aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bolt/test/binary-analysis/AArch64/cmdline-args.test11
-rw-r--r--bolt/test/lsda-section-name.cpp4
-rw-r--r--clang-tools-extra/clang-apply-replacements/tool/ClangApplyReplacementsMain.cpp3
-rw-r--r--clang-tools-extra/clang-change-namespace/tool/ClangChangeNamespace.cpp5
-rw-r--r--clang-tools-extra/clang-include-fixer/tool/ClangIncludeFixer.cpp2
-rw-r--r--clang-tools-extra/clang-move/tool/ClangMove.cpp5
-rw-r--r--clang-tools-extra/clang-reorder-fields/tool/ClangReorderFields.cpp5
-rw-r--r--clang-tools-extra/clang-tidy/ClangTidy.cpp5
-rw-r--r--clang-tools-extra/clang-tidy/ExpandModularHeadersPPCallbacks.cpp2
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/BugproneTidyModule.cpp3
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/CMakeLists.txt1
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.cpp180
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.h31
-rw-r--r--clang-tools-extra/clang-tidy/plugin/ClangTidyPlugin.cpp2
-rw-r--r--clang-tools-extra/clangd/Preamble.cpp2
-rw-r--r--clang-tools-extra/clangd/SystemIncludeExtractor.cpp2
-rw-r--r--clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp93
-rw-r--r--clang-tools-extra/clangd/unittests/ConfigCompileTests.cpp2
-rw-r--r--clang-tools-extra/clangd/unittests/tweaks/TweakTests.cpp2
-rw-r--r--clang-tools-extra/docs/ReleaseNotes.rst6
-rw-r--r--clang-tools-extra/docs/clang-tidy/checks/bugprone/invalid-enum-default-initialization.rst72
-rw-r--r--clang-tools-extra/docs/clang-tidy/checks/list.rst1
-rw-r--r--clang-tools-extra/modularize/ModularizeUtilities.cpp2
-rw-r--r--clang-tools-extra/test/clang-apply-replacements/basic.cpp20
-rw-r--r--clang-tools-extra/test/clang-apply-replacements/conflict.cpp20
-rw-r--r--clang-tools-extra/test/clang-apply-replacements/crlf.cpp10
-rw-r--r--clang-tools-extra/test/clang-apply-replacements/format-header.cpp20
-rw-r--r--clang-tools-extra/test/clang-apply-replacements/format.cpp18
-rw-r--r--clang-tools-extra/test/clang-apply-replacements/identical-in-TU.cpp12
-rw-r--r--clang-tools-extra/test/clang-apply-replacements/identical.cpp12
-rw-r--r--clang-tools-extra/test/clang-apply-replacements/ignore-conflict.cpp10
-rw-r--r--clang-tools-extra/test/clang-apply-replacements/invalid-files.cpp8
-rw-r--r--clang-tools-extra/test/clang-apply-replacements/order-dependent.cpp14
-rw-r--r--clang-tools-extra/test/clang-apply-replacements/relative-paths.cpp14
-rw-r--r--clang-tools-extra/test/clang-apply-replacements/yml-basic.cpp20
-rw-r--r--clang-tools-extra/test/clang-change-namespace/allow-list.cpp4
-rw-r--r--clang-tools-extra/test/clang-change-namespace/macro.cpp21
-rw-r--r--clang-tools-extra/test/clang-include-fixer/include_path.cpp28
-rw-r--r--clang-tools-extra/test/clang-include-fixer/multiple_fixes.cpp12
-rw-r--r--clang-tools-extra/test/clang-include-fixer/yamldb_autodetect.cpp6
-rw-r--r--clang-tools-extra/test/clang-move/move-class.cpp42
-rw-r--r--clang-tools-extra/test/clang-move/move-enum-decl.cpp32
-rw-r--r--clang-tools-extra/test/clang-move/move-function.cpp42
-rw-r--r--clang-tools-extra/test/clang-move/move-multiple-classes.cpp18
-rw-r--r--clang-tools-extra/test/clang-move/move-template-class.cpp28
-rw-r--r--clang-tools-extra/test/clang-move/move-type-alias.cpp40
-rw-r--r--clang-tools-extra/test/clang-move/move-used-helper-decls.cpp98
-rw-r--r--clang-tools-extra/test/clang-move/move-var.cpp36
-rw-r--r--clang-tools-extra/test/clang-move/no-move-macro-helpers.cpp32
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/bugprone/invalid-enum-default-initialization.c54
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/bugprone/invalid-enum-default-initialization.cpp145
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/misc/header-include-cycle.cpp20
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/misc/unused-parameters.cpp12
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/modernize/concat-nested-namespaces.cpp13
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/modernize/pass-by-value-header.cpp7
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/modernize/pass-by-value-multi-fixes.cpp13
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/portability/restrict-system-includes-transitive.cpp12
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming-symlink.cpp11
-rw-r--r--clang-tools-extra/test/clang-tidy/infrastructure/clang-tidy-diff.cpp6
-rw-r--r--clang-tools-extra/test/clang-tidy/infrastructure/clang-tidy-run-with-database.cpp38
-rw-r--r--clang-tools-extra/test/clang-tidy/infrastructure/clang-tidy-store-check-profile-one-tu.cpp12
-rw-r--r--clang-tools-extra/test/clang-tidy/infrastructure/diagnostic.cpp18
-rw-r--r--clang-tools-extra/test/clang-tidy/infrastructure/export-relpath.cpp13
-rw-r--r--clang-tools-extra/test/clang-tidy/infrastructure/list-checks.cpp6
-rw-r--r--clang-tools-extra/test/clang-tidy/infrastructure/read_file_config.cpp12
-rw-r--r--clang-tools-extra/test/clang-tidy/infrastructure/verify-config.cpp16
-rw-r--r--clang-tools-extra/test/modularize/NoProblemsAssistant.modularize4
-rw-r--r--clang-tools-extra/unittests/clang-apply-replacements/ApplyReplacementsTest.cpp3
-rw-r--r--clang-tools-extra/unittests/clang-tidy/ClangTidyOptionsTest.cpp9
-rw-r--r--clang-tools-extra/unittests/clang-tidy/ClangTidyTest.h3
-rw-r--r--clang-tools-extra/unittests/include/common/VirtualFileHelper.h3
-rw-r--r--clang/docs/InternalsManual.rst42
-rw-r--r--clang/docs/ReleaseNotes.rst7
-rw-r--r--clang/docs/ShadowCallStack.rst2
-rw-r--r--clang/docs/ThinLTO.rst25
-rw-r--r--clang/docs/analyzer/user-docs/CrossTranslationUnit.rst4
-rw-r--r--clang/include/clang/Basic/Attr.td2
-rw-r--r--clang/include/clang/Basic/BuiltinsAMDGPU.def17
-rw-r--r--clang/include/clang/Basic/BuiltinsNVPTX.td11
-rw-r--r--clang/include/clang/Basic/Cuda.h5
-rw-r--r--clang/include/clang/Basic/CustomizableOptional.h11
-rw-r--r--clang/include/clang/Basic/DiagnosticIDs.h5
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td24
-rw-r--r--clang/include/clang/Basic/OffloadArch.h4
-rw-r--r--clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h4
-rw-r--r--clang/include/clang/CIR/Dialect/IR/CIROps.td47
-rw-r--r--clang/include/clang/CIR/MissingFeatures.h1
-rw-r--r--clang/include/clang/Driver/Options.td2
-rw-r--r--clang/include/clang/Frontend/ASTUnit.h8
-rw-r--r--clang/include/clang/Frontend/CompilerInstance.h4
-rw-r--r--clang/include/clang/Frontend/PrecompiledPreamble.h2
-rw-r--r--clang/lib/AST/ByteCode/Compiler.cpp34
-rw-r--r--clang/lib/AST/ByteCode/Interp.h8
-rw-r--r--clang/lib/AST/ByteCode/Opcodes.td4
-rw-r--r--clang/lib/AST/Expr.cpp14
-rw-r--r--clang/lib/AST/ExprConstant.cpp17
-rw-r--r--clang/lib/Analysis/RetainSummaryManager.cpp3
-rw-r--r--clang/lib/Basic/Cuda.cpp6
-rw-r--r--clang/lib/Basic/OffloadArch.cpp4
-rw-r--r--clang/lib/Basic/SourceManager.cpp14
-rw-r--r--clang/lib/Basic/Targets/NVPTX.cpp12
-rw-r--r--clang/lib/Basic/Targets/WebAssembly.cpp23
-rw-r--r--clang/lib/Basic/Targets/WebAssembly.h2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCall.h2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp16
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp14
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp88
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp19
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp102
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmt.cpp17
-rw-r--r--clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp156
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp3
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp16
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.h1
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp4
-rw-r--r--clang/lib/CodeGen/CodeGenAction.cpp4
-rw-r--r--clang/lib/CodeGen/CoverageMappingGen.cpp7
-rw-r--r--clang/lib/CodeGen/ObjectFilePCHContainerWriter.cpp2
-rw-r--r--clang/lib/CrossTU/CrossTranslationUnit.cpp9
-rw-r--r--clang/lib/Driver/ToolChains/BareMetal.cpp3
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp38
-rw-r--r--clang/lib/Driver/ToolChains/Cuda.cpp3
-rw-r--r--clang/lib/Frontend/ASTMerge.cpp4
-rw-r--r--clang/lib/Frontend/ASTUnit.cpp24
-rw-r--r--clang/lib/Frontend/ChainedIncludesSource.cpp7
-rw-r--r--clang/lib/Frontend/CompilerInstance.cpp19
-rw-r--r--clang/lib/Frontend/CompilerInvocation.cpp4
-rw-r--r--clang/lib/Frontend/FrontendAction.cpp10
-rw-r--r--clang/lib/Frontend/PrecompiledPreamble.cpp20
-rw-r--r--clang/lib/Frontend/SerializedDiagnosticPrinter.cpp5
-rw-r--r--clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h2
-rw-r--r--clang/lib/Headers/hlsl/hlsl_intrinsics.h2
-rw-r--r--clang/lib/Interpreter/CodeCompletion.cpp2
-rw-r--r--clang/lib/Interpreter/Interpreter.cpp6
-rw-r--r--clang/lib/Sema/SemaAMDGPU.cpp10
-rw-r--r--clang/lib/Sema/SemaChecking.cpp4
-rw-r--r--clang/lib/Sema/SemaCodeComplete.cpp8
-rw-r--r--clang/lib/Sema/SemaDecl.cpp11
-rw-r--r--clang/lib/Sema/SemaSYCL.cpp31
-rw-r--r--clang/lib/Serialization/ASTReader.cpp3
-rw-r--r--clang/lib/Serialization/ASTReaderDecl.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp2
-rw-r--r--clang/lib/Tooling/CompilationDatabase.cpp5
-rw-r--r--clang/lib/Tooling/Core/Replacement.cpp7
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp4
-rw-r--r--clang/lib/Tooling/Refactoring.cpp5
-rw-r--r--clang/lib/Tooling/Tooling.cpp26
-rw-r--r--clang/test/AST/ByteCode/codegen.cpp4
-rw-r--r--clang/test/AST/ByteCode/functions.cpp24
-rw-r--r--clang/test/AST/ByteCode/intap.cpp14
-rw-r--r--clang/test/ASTSYCL/ast-dump-sycl-kernel-entry-point.cpp6
-rw-r--r--clang/test/Analysis/builtin_assume.cpp13
-rw-r--r--clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c73
-rw-r--r--clang/test/CIR/CodeGen/complex-mul-div.cpp280
-rw-r--r--clang/test/CIR/CodeGen/complex-unary.cpp86
-rw-r--r--clang/test/CIR/CodeGen/empty.cpp32
-rw-r--r--clang/test/CIR/CodeGen/variable-decomposition.cpp55
-rw-r--r--clang/test/CXX/expr/expr.const/p2-0x.cpp5
-rw-r--r--clang/test/CodeCompletion/skip-explicit-object-parameter.cpp50
-rw-r--r--clang/test/CodeGen/attr-counted-by-for-pointers.c77
-rw-r--r--clang/test/CodeGen/dbg-info-all-calls-described.c88
-rw-r--r--clang/test/CodeGen/debug-info-abspath.c13
-rw-r--r--clang/test/CodeGen/debug-info-compilation-dir.c7
-rw-r--r--clang/test/CodeGen/debug-prefix-map.c5
-rw-r--r--clang/test/CodeGenCXX/debug-info-function-context.cpp2
-rw-r--r--clang/test/CodeGenCXX/difile_entry.cpp2
-rw-r--r--clang/test/CodeGenHLSL/builtins/D3DCOLORtoUBYTE4.hlsl10
-rw-r--r--clang/test/CodeGenObjC/exceptions.m19
-rw-r--r--clang/test/CodeGenOpenCL/amdgpu-features.cl2
-rw-r--r--clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl358
-rw-r--r--clang/test/Driver/baremetal.cpp10
-rw-r--r--clang/test/Driver/compilation-dir.c5
-rw-r--r--clang/test/Driver/wasm-features.c6
-rw-r--r--clang/test/Frontend/dump-minimization-hints.cpp32
-rw-r--r--clang/test/Headers/__clang_hip_math.hip1496
-rw-r--r--clang/test/Headers/__cpuidex_conflict.c15
-rw-r--r--clang/test/Misc/target-invalid-cpu-note/nvptx.c4
-rw-r--r--clang/test/PCH/debug-info-pch-container-path.c1
-rw-r--r--clang/test/PCH/debug-info-pch-path.c3
-rw-r--r--clang/test/Preprocessor/wasm-target-features.c12
-rw-r--r--clang/test/Profile/coverage-prefix-map.c2
-rw-r--r--clang/test/Sema/constexpr-void-cast.c7
-rw-r--r--clang/test/Sema/warn-unreachable_crash.cpp41
-rw-r--r--clang/test/SemaHLSL/BuiltIns/D3DCOLORtoUBYTE4-errors.hlsl2
-rw-r--r--clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl40
-rw-r--r--clang/test/SemaSYCL/sycl-kernel-entry-point-attr-appertainment.cpp53
-rw-r--r--clang/test/SemaSYCL/sycl-kernel-entry-point-attr-kernel-name-module.cpp12
-rw-r--r--clang/test/SemaSYCL/sycl-kernel-entry-point-attr-kernel-name-pch.cpp4
-rw-r--r--clang/test/SemaSYCL/sycl-kernel-entry-point-attr-kernel-name.cpp16
-rw-r--r--clang/tools/clang-extdef-mapping/ClangExtDefMapGen.cpp5
-rw-r--r--clang/tools/clang-format/ClangFormat.cpp16
-rw-r--r--clang/tools/clang-fuzzer/handle-cxx/handle_cxx.cpp6
-rw-r--r--clang/tools/clang-import-test/clang-import-test.cpp2
-rw-r--r--clang/tools/clang-installapi/ClangInstallAPI.cpp13
-rw-r--r--clang/tools/clang-repl/CMakeLists.txt15
-rw-r--r--clang/tools/diagtool/ShowEnabledWarnings.cpp4
-rw-r--r--clang/tools/diagtool/TreeView.cpp2
-rw-r--r--clang/tools/driver/cc1_main.cpp2
-rw-r--r--clang/tools/driver/cc1as_main.cpp3
-rw-r--r--clang/tools/driver/cc1gen_reproducer_main.cpp4
-rw-r--r--clang/tools/driver/driver.cpp4
-rw-r--r--clang/tools/libclang/CIndexCodeCompletion.cpp6
-rw-r--r--clang/unittests/AST/ASTVectorTest.cpp5
-rw-r--r--clang/unittests/AST/CommentLexer.cpp6
-rw-r--r--clang/unittests/AST/CommentParser.cpp5
-rw-r--r--clang/unittests/AST/CommentTextTest.cpp2
-rw-r--r--clang/unittests/Analysis/MacroExpansionContextTest.cpp7
-rw-r--r--clang/unittests/Analysis/UnsafeBufferUsageTest.cpp7
-rw-r--r--clang/unittests/Basic/DiagnosticTest.cpp14
-rw-r--r--clang/unittests/Basic/FileManagerTest.cpp12
-rw-r--r--clang/unittests/Basic/SarifTest.cpp7
-rw-r--r--clang/unittests/Basic/SourceManagerTest.cpp5
-rw-r--r--clang/unittests/CodeGen/TestCompiler.h2
-rw-r--r--clang/unittests/Driver/DXCModeTest.cpp20
-rw-r--r--clang/unittests/Driver/SanitizerArgsTest.cpp5
-rw-r--r--clang/unittests/Driver/SimpleDiagnosticConsumer.h9
-rw-r--r--clang/unittests/Driver/ToolChainTest.cpp79
-rw-r--r--clang/unittests/Frontend/ASTUnitTest.cpp3
-rw-r--r--clang/unittests/Frontend/CodeGenActionTest.cpp36
-rw-r--r--clang/unittests/Frontend/CompilerInstanceTest.cpp4
-rw-r--r--clang/unittests/Frontend/PCHPreambleTest.cpp2
-rw-r--r--clang/unittests/Frontend/ReparseWorkingDirTest.cpp4
-rw-r--r--clang/unittests/Frontend/SearchPathTest.cpp4
-rw-r--r--clang/unittests/Frontend/TextDiagnosticTest.cpp3
-rw-r--r--clang/unittests/Frontend/UtilsTest.cpp2
-rw-r--r--clang/unittests/Lex/HeaderSearchTest.cpp7
-rw-r--r--clang/unittests/Lex/LexerTest.cpp5
-rw-r--r--clang/unittests/Lex/ModuleDeclStateTest.cpp5
-rw-r--r--clang/unittests/Lex/PPCallbacksTest.cpp5
-rw-r--r--clang/unittests/Lex/PPConditionalDirectiveRecordTest.cpp5
-rw-r--r--clang/unittests/Lex/PPDependencyDirectivesTest.cpp7
-rw-r--r--clang/unittests/Lex/PPMemoryAllocationsTest.cpp5
-rw-r--r--clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp5
-rw-r--r--clang/unittests/Sema/SemaNoloadLookupTest.cpp2
-rw-r--r--clang/unittests/Serialization/ForceCheckFileInputTest.cpp4
-rw-r--r--clang/unittests/Serialization/LoadSpecLazilyTest.cpp2
-rw-r--r--clang/unittests/Serialization/ModuleCacheTest.cpp8
-rw-r--r--clang/unittests/Serialization/NoCommentsTest.cpp2
-rw-r--r--clang/unittests/Serialization/PreambleInNamedModulesTest.cpp4
-rw-r--r--clang/unittests/Serialization/VarDeclConstantInitTest.cpp2
-rw-r--r--clang/unittests/Support/TimeProfilerTest.cpp3
-rw-r--r--clang/unittests/Tooling/CompilationDatabaseTest.cpp3
-rw-r--r--clang/unittests/Tooling/DependencyScanning/DependencyScannerTest.cpp12
-rw-r--r--clang/unittests/Tooling/RefactoringTest.cpp9
-rw-r--r--clang/unittests/Tooling/RewriterTestContext.h9
-rw-r--r--clang/unittests/Tooling/Syntax/TokensTest.cpp7
-rw-r--r--clang/unittests/Tooling/Syntax/TreeTestBase.cpp2
-rw-r--r--clang/unittests/Tooling/Syntax/TreeTestBase.h5
-rw-r--r--clang/unittests/Tooling/ToolingTest.cpp78
-rw-r--r--compiler-rt/include/profile/MemProfData.inc40
-rw-r--r--compiler-rt/lib/memprof/memprof_interface_internal.h10
-rw-r--r--compiler-rt/lib/memprof/memprof_rawprofile.cpp32
-rw-r--r--compiler-rt/lib/memprof/tests/CMakeLists.txt1
-rw-r--r--compiler-rt/lib/memprof/tests/histogram_encoding.cpp35
-rw-r--r--compiler-rt/lib/scudo/standalone/allocator_config.def3
-rw-r--r--compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h12
-rw-r--r--compiler-rt/lib/scudo/standalone/combined.h34
-rw-r--r--compiler-rt/lib/scudo/standalone/secondary.h21
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/combined_test.cpp103
-rw-r--r--compiler-rt/test/memprof/TestCases/memprof_histogram_uint8.cpp38
-rw-r--r--compiler-rt/test/ubsan_minimal/TestCases/alignment-assumption.c2
-rw-r--r--compiler-rt/test/ubsan_minimal/TestCases/icall.c4
-rw-r--r--compiler-rt/test/ubsan_minimal/TestCases/implicit-integer-sign-change.c2
-rw-r--r--compiler-rt/test/ubsan_minimal/TestCases/implicit-signed-integer-truncation-or-sign-change.c2
-rw-r--r--compiler-rt/test/ubsan_minimal/TestCases/implicit-signed-integer-truncation.c2
-rw-r--r--compiler-rt/test/ubsan_minimal/TestCases/implicit-unsigned-integer-truncation.c2
-rw-r--r--compiler-rt/test/ubsan_minimal/TestCases/local_bounds.cpp8
-rw-r--r--compiler-rt/test/ubsan_minimal/TestCases/nullptr-and-nonzero-offset.c4
-rw-r--r--compiler-rt/test/ubsan_minimal/TestCases/override-callback.c6
-rw-r--r--compiler-rt/test/ubsan_minimal/TestCases/recover-dedup-limit.cpp2
-rw-r--r--compiler-rt/test/ubsan_minimal/TestCases/recover-dedup.cpp2
-rw-r--r--compiler-rt/test/ubsan_minimal/TestCases/test-darwin-interface.c4
-rw-r--r--compiler-rt/test/ubsan_minimal/TestCases/uadd-overflow.cpp4
-rw-r--r--compiler-rt/test/ubsan_minimal/lit.common.cfg.py10
-rw-r--r--cross-project-tests/CMakeLists.txt5
-rw-r--r--cross-project-tests/dtlto/ld-archive-thin.test97
-rw-r--r--cross-project-tests/lit.cfg.py2
-rw-r--r--flang-rt/include/flang-rt/runtime/format.h8
-rw-r--r--flang-rt/include/flang-rt/runtime/io-stmt.h2
-rw-r--r--flang-rt/lib/runtime/edit-input.cpp34
-rw-r--r--flang-rt/lib/runtime/io-stmt.cpp5
-rw-r--r--flang-rt/lib/runtime/namelist.cpp3
-rw-r--r--flang-rt/lib/runtime/unit.h3
-rw-r--r--flang-rt/unittests/Runtime/CMakeLists.txt1
-rw-r--r--flang-rt/unittests/Runtime/InputExtensions.cpp106
-rw-r--r--flang/docs/Extensions.md5
-rw-r--r--flang/docs/FortranStandardsSupport.md6
-rw-r--r--flang/examples/FeatureList/FeatureList.cpp1
-rw-r--r--flang/examples/FlangOmpReport/FlangOmpReportVisitor.cpp57
-rw-r--r--flang/include/flang/Parser/dump-parse-tree.h5
-rw-r--r--flang/include/flang/Parser/openmp-utils.h8
-rw-r--r--flang/include/flang/Parser/parse-tree.h46
-rw-r--r--flang/include/flang/Semantics/openmp-modifiers.h1
-rw-r--r--flang/include/flang/Semantics/semantics.h1
-rw-r--r--flang/lib/Frontend/CompilerInstance.cpp6
-rw-r--r--flang/lib/Lower/OpenMP/ClauseProcessor.cpp10
-rw-r--r--flang/lib/Lower/OpenMP/Clauses.cpp15
-rw-r--r--flang/lib/Lower/OpenMP/DataSharingProcessor.cpp52
-rw-r--r--flang/lib/Lower/OpenMP/OpenMP.cpp4
-rw-r--r--flang/lib/Parser/openmp-parsers.cpp71
-rw-r--r--flang/lib/Parser/unparse.cpp20
-rw-r--r--flang/lib/Semantics/check-cuda.cpp9
-rw-r--r--flang/lib/Semantics/check-declarations.cpp4
-rw-r--r--flang/lib/Semantics/check-omp-structure.cpp41
-rw-r--r--flang/lib/Semantics/expression.cpp77
-rw-r--r--flang/lib/Semantics/openmp-modifiers.cpp16
-rw-r--r--flang/lib/Semantics/pointer-assignment.cpp16
-rw-r--r--flang/lib/Semantics/resolve-directives.cpp7
-rw-r--r--flang/lib/Semantics/resolve-names.cpp3
-rw-r--r--flang/test/Examples/omp-atomic.f906
-rw-r--r--flang/test/Examples/omp-sections.f904
-rw-r--r--flang/test/Parser/OpenMP/declare-target-indirect-tree.f906
-rw-r--r--flang/test/Parser/OpenMP/enter-automap-modifier.f9016
-rw-r--r--flang/test/Parser/OpenMP/sections.f90144
-rw-r--r--flang/test/Semantics/OpenACC/acc-default-none-function.f9020
-rw-r--r--flang/test/Semantics/assign02.f902
-rw-r--r--flang/test/Semantics/bug1214.cuf49
-rw-r--r--flang/test/Semantics/cuf11.cuf2
-rw-r--r--flang/tools/flang-driver/driver.cpp5
-rw-r--r--flang/tools/flang-driver/fc1_main.cpp5
-rw-r--r--libc/cmake/modules/LLVMLibCCompileOptionRules.cmake4
-rw-r--r--libc/config/baremetal/aarch64/entrypoints.txt1
-rw-r--r--libc/config/baremetal/arm/entrypoints.txt1
-rw-r--r--libc/config/baremetal/config.json5
-rw-r--r--libc/config/baremetal/riscv/entrypoints.txt1
-rw-r--r--libc/config/config.json6
-rw-r--r--libc/config/gpu/amdgpu/config.json5
-rw-r--r--libc/config/gpu/nvptx/config.json5
-rw-r--r--libc/docs/configure.rst2
-rw-r--r--libc/shared/math.h2
-rw-r--r--libc/shared/math/atan2.h23
-rw-r--r--libc/shared/math/atanf16.h28
-rw-r--r--libc/src/__support/GPU/allocator.cpp72
-rw-r--r--libc/src/__support/math/CMakeLists.txt35
-rw-r--r--libc/src/__support/math/asin_utils.h2
-rw-r--r--libc/src/__support/math/atan2.h209
-rw-r--r--libc/src/__support/math/atanf16.h119
-rw-r--r--libc/src/__support/threads/CMakeLists.txt8
-rw-r--r--libc/src/__support/threads/gpu/CMakeLists.txt5
-rw-r--r--libc/src/__support/threads/gpu/mutex.h32
-rw-r--r--libc/src/__support/threads/mutex.h57
-rw-r--r--libc/src/math/generic/CMakeLists.txt22
-rw-r--r--libc/src/math/generic/atan2.cpp187
-rw-r--r--libc/src/math/generic/atan2l.cpp4
-rw-r--r--libc/src/math/generic/atanf16.cpp95
-rw-r--r--libc/src/sched/linux/CMakeLists.txt16
-rw-r--r--libc/src/sched/linux/sched_getaffinity.cpp4
-rw-r--r--libc/src/sched/linux/sched_getcpucount.cpp3
-rw-r--r--libc/src/sched/linux/sched_getscheduler.cpp1
-rw-r--r--libc/src/sched/linux/sched_rr_get_interval.cpp2
-rw-r--r--libc/src/sched/linux/sched_setaffinity.cpp4
-rw-r--r--libc/src/sched/sched_getaffinity.h5
-rw-r--r--libc/src/sched/sched_getcpucount.h3
-rw-r--r--libc/src/sched/sched_getscheduler.h3
-rw-r--r--libc/src/sched/sched_rr_get_interval.h4
-rw-r--r--libc/src/sched/sched_setaffinity.h5
-rw-r--r--libc/startup/baremetal/CMakeLists.txt58
-rw-r--r--libc/startup/baremetal/arm/CMakeLists.txt16
-rw-r--r--libc/startup/baremetal/arm/start.cpp92
-rw-r--r--libc/startup/baremetal/fini.cpp8
-rw-r--r--libc/startup/baremetal/fini.h16
-rw-r--r--libc/startup/baremetal/init.cpp10
-rw-r--r--libc/startup/baremetal/init.h18
-rw-r--r--libc/test/shared/CMakeLists.txt2
-rw-r--r--libc/test/shared/shared_math_test.cpp2
-rw-r--r--libc/test/src/sched/CMakeLists.txt11
-rw-r--r--libc/test/src/sched/affinity_test.cpp3
-rw-r--r--libc/test/src/sched/cpu_count_test.cpp5
-rw-r--r--libc/test/src/sched/get_priority_test.cpp2
-rw-r--r--libc/test/src/sched/sched_rr_get_interval_test.cpp2
-rw-r--r--libclc/CMakeLists.txt41
-rw-r--r--libclc/cmake/modules/AddLibclc.cmake25
-rw-r--r--libcxx/include/__assert4
-rw-r--r--libcxx/include/__config18
-rw-r--r--libcxx/include/__cxx03/__math/logarithms.h2
-rw-r--r--libcxx/include/__hash_table72
-rw-r--r--libcxx/include/__math/logarithms.h2
-rw-r--r--libcxx/include/tuple70
-rw-r--r--libcxx/test/support/check_assertion.h4
-rw-r--r--libsycl/.clang-format4
-rw-r--r--libsycl/.clang-tidy17
-rw-r--r--libsycl/CMakeLists.txt126
-rw-r--r--libsycl/LICENSE.txt278
-rw-r--r--libsycl/README.md20
-rw-r--r--libsycl/docs/index.rst79
-rw-r--r--libsycl/include/CL/sycl.hpp30
-rw-r--r--libsycl/include/sycl/__impl/detail/config.hpp59
-rw-r--r--libsycl/include/sycl/__impl/platform.hpp31
-rw-r--r--libsycl/include/sycl/sycl.hpp19
-rw-r--r--libsycl/src/CMakeLists.txt98
-rw-r--r--libsycl/src/ld-version-script.txt20
-rw-r--r--libsycl/src/platform.cpp17
-rw-r--r--libsycl/src/version.hpp.in16
-rw-r--r--lld/COFF/Chunks.cpp22
-rw-r--r--lld/COFF/Chunks.h32
-rw-r--r--lld/COFF/Config.h3
-rw-r--r--lld/COFF/Driver.cpp88
-rw-r--r--lld/COFF/Driver.h2
-rw-r--r--lld/COFF/DriverUtils.cpp16
-rw-r--r--lld/COFF/MarkLive.cpp5
-rw-r--r--lld/COFF/Options.td4
-rw-r--r--lld/COFF/SymbolTable.cpp42
-rw-r--r--lld/COFF/SymbolTable.h6
-rw-r--r--lld/COFF/Symbols.cpp8
-rw-r--r--lld/COFF/Symbols.h4
-rw-r--r--lld/COFF/Writer.cpp29
-rw-r--r--lld/ELF/Arch/LoongArch.cpp117
-rw-r--r--lld/ELF/InputFiles.cpp59
-rw-r--r--lld/test/COFF/arm64x-sameaddress.test107
-rw-r--r--lld/test/ELF/dtlto/archive-thin.test65
-rw-r--r--lld/test/ELF/loongarch-pc-hi20-lo12-got.s145
-rw-r--r--lld/test/ELF/loongarch-relax-pc-hi20-lo12.s10
-rw-r--r--lldb/bindings/interface/SBThreadExtensions.i4
-rw-r--r--lldb/bindings/python/python-extensions.swig1
-rw-r--r--lldb/docs/python_api_enums.rst2
-rw-r--r--lldb/docs/resources/lldbgdbremote.md83
-rw-r--r--lldb/docs/use/formatting.rst6
-rw-r--r--lldb/include/lldb/Core/Module.h3
-rw-r--r--lldb/include/lldb/Core/ModuleList.h8
-rw-r--r--lldb/include/lldb/Expression/Expression.h57
-rw-r--r--lldb/include/lldb/Symbol/SymbolFile.h13
-rw-r--r--lldb/include/lldb/Symbol/TypeSystem.h3
-rw-r--r--lldb/include/lldb/Target/Process.h1
-rw-r--r--lldb/include/lldb/lldb-enumerations.h15
-rw-r--r--lldb/packages/Python/lldbsuite/support/temp_file.py23
-rw-r--r--lldb/packages/Python/lldbsuite/test/builders/__init__.py10
-rw-r--r--lldb/packages/Python/lldbsuite/test/builders/builder.py5
-rw-r--r--lldb/packages/Python/lldbsuite/test/configuration.py4
-rw-r--r--lldb/packages/Python/lldbsuite/test/decorators.py143
-rw-r--r--lldb/packages/Python/lldbsuite/test/dotest.py14
-rw-r--r--lldb/packages/Python/lldbsuite/test/dotest_args.py8
-rw-r--r--lldb/packages/Python/lldbsuite/test/lldbtest.py19
-rw-r--r--lldb/packages/Python/lldbsuite/test/make/Makefile.rules10
-rwxr-xr-xlldb/scripts/framework-header-fix.py2
-rw-r--r--lldb/source/Commands/CommandObjectDWIMPrint.cpp47
-rw-r--r--lldb/source/Commands/CommandObjectMemory.cpp2
-rw-r--r--lldb/source/Core/DumpDataExtractor.cpp16
-rw-r--r--lldb/source/Core/Module.cpp9
-rw-r--r--lldb/source/Core/ModuleList.cpp14
-rw-r--r--lldb/source/DataFormatters/FormatManager.cpp1
-rw-r--r--lldb/source/DataFormatters/VectorType.cpp2
-rw-r--r--lldb/source/Expression/Expression.cpp49
-rw-r--r--lldb/source/Expression/IRExecutionUnit.cpp65
-rw-r--r--lldb/source/Expression/IRInterpreter.cpp4
-rw-r--r--lldb/source/Expression/Materializer.cpp21
-rw-r--r--lldb/source/Host/common/Host.cpp9
-rw-r--r--lldb/source/Host/windows/Host.cpp63
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp2
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp2
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionSourceCode.cpp3
-rw-r--r--lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp2
-rw-r--r--lldb/source/Plugins/Instruction/RISCV/EmulateInstructionRISCV.cpp6
-rw-r--r--lldb/source/Plugins/Language/ClangCommon/ClangHighlighter.cpp3
-rw-r--r--lldb/source/Plugins/Process/wasm/CMakeLists.txt1
-rw-r--r--lldb/source/Plugins/Process/wasm/ProcessWasm.cpp33
-rw-r--r--lldb/source/Plugins/Process/wasm/ProcessWasm.h8
-rw-r--r--lldb/source/Plugins/Process/wasm/RegisterContextWasm.cpp109
-rw-r--r--lldb/source/Plugins/Process/wasm/RegisterContextWasm.h69
-rw-r--r--lldb/source/Plugins/Process/wasm/ThreadWasm.cpp17
-rw-r--r--lldb/source/Plugins/Process/wasm/ThreadWasm.h3
-rw-r--r--lldb/source/Plugins/SymbolFile/CTF/SymbolFileCTF.cpp29
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/AppleDWARFIndex.cpp10
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/AppleDWARFIndex.h13
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp62
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.cpp13
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.h31
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp19
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h13
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp51
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.h13
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp56
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h3
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.cpp11
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.h3
-rw-r--r--lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp6
-rw-r--r--lldb/source/Plugins/SymbolFile/NativePDB/UdtRecordCompleter.cpp5
-rw-r--r--lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp5
-rw-r--r--lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp78
-rw-r--r--lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h7
-rw-r--r--lldb/source/Target/Process.cpp12
-rw-r--r--lldb/source/ValueObject/ValueObject.cpp5
-rw-r--r--lldb/test/API/commands/watchpoints/step_over_watchpoint/TestStepOverWatchpoint.py10
-rw-r--r--lldb/test/API/commands/watchpoints/watchpoint_count/TestWatchpointCount.py4
-rw-r--r--lldb/test/API/functionalities/gdb_remote_client/TestGDBRemoteClient.py6
-rw-r--r--lldb/test/API/functionalities/gdb_remote_client/TestWasm.py225
-rw-r--r--lldb/test/API/functionalities/gdb_remote_client/simple.c10
-rw-r--r--lldb/test/API/functionalities/gdb_remote_client/simple.yaml228
-rw-r--r--lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/TestOSPluginStepping.py2
-rw-r--r--lldb/test/API/functionalities/postmortem/elf-core/expr/TestExpr.py4
-rw-r--r--lldb/test/API/functionalities/postmortem/minidump-new/TestMiniDumpNew.py14
-rw-r--r--lldb/test/API/functionalities/postmortem/minidump/TestMiniDump.py2
-rw-r--r--lldb/test/API/functionalities/scripted_process/stack_core_scripted_process.py4
-rw-r--r--lldb/test/API/functionalities/step_scripted/TestStepScripted.py4
-rw-r--r--lldb/test/API/functionalities/tail_call_frames/cross_dso/TestCrossDSOTailCalls.py1
-rw-r--r--lldb/test/API/functionalities/tail_call_frames/cross_object/TestCrossObjectTailCalls.py1
-rw-r--r--lldb/test/API/functionalities/tsan/multiple/TestTsanMultiple.py2
-rw-r--r--lldb/test/API/lang/cpp/expr-definition-in-dylib/Makefile6
-rw-r--r--lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py33
-rw-r--r--lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.cpp3
-rw-r--r--lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.h8
-rw-r--r--lldb/test/API/lang/cpp/expr-definition-in-dylib/main.cpp6
-rw-r--r--lldb/test/API/macosx/abort_with_payload/TestAbortWithPayload.py2
-rw-r--r--lldb/test/API/macosx/corefile-exception-reason/TestCorefileExceptionReason.py2
-rw-r--r--lldb/test/API/riscv/break-undecoded/TestBreakpointIllegal.py4
-rw-r--r--lldb/test/Shell/Recognizer/Inputs/ubsan_add_overflow.c3
-rw-r--r--lldb/test/Shell/Recognizer/ubsan_add_overflow.test22
-rw-r--r--lldb/test/Shell/Scripts/TestFrameworkFixScript.test2
-rw-r--r--lldb/test/Shell/Scripts/TestFrameworkFixUnifdef.test2
-rw-r--r--lldb/test/Shell/Scripts/TestRPCFrameworkFixScript.test2
-rw-r--r--lldb/tools/lldb-rpc/LLDBRPCHeaders.cmake2
-rw-r--r--lldb/unittests/Core/DumpDataExtractorTest.cpp6
-rw-r--r--lldb/unittests/Expression/CMakeLists.txt1
-rw-r--r--lldb/unittests/Expression/ExpressionTest.cpp122
-rw-r--r--lldb/unittests/Host/FileSystemTest.cpp2
-rw-r--r--lldb/unittests/Symbol/TestTypeSystemClang.cpp142
-rw-r--r--llvm/CMakeLists.txt2
-rw-r--r--llvm/docs/CommandGuide/lit.rst5
-rw-r--r--llvm/docs/HowToCrossCompileBuiltinsOnArm.rst227
-rw-r--r--llvm/docs/ReleaseNotes.md3
-rw-r--r--llvm/docs/SPIRVUsage.rst30
-rw-r--r--llvm/include/llvm/ADT/Any.h1
-rw-r--r--llvm/include/llvm/Analysis/DXILResource.h82
-rw-r--r--llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h4
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfo.h4
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfoImpl.h2
-rw-r--r--llvm/include/llvm/Analysis/VectorUtils.h3
-rw-r--r--llvm/include/llvm/BinaryFormat/ELF.h3
-rw-r--r--llvm/include/llvm/Bitstream/BitstreamWriter.h5
-rw-r--r--llvm/include/llvm/CodeGen/AsmPrinter.h39
-rw-r--r--llvm/include/llvm/CodeGen/MachineBasicBlock.h9
-rw-r--r--llvm/include/llvm/CodeGen/MachineFunction.h10
-rw-r--r--llvm/include/llvm/CodeGen/MachineInstrBuilder.h92
-rw-r--r--llvm/include/llvm/CodeGen/MachineScheduler.h8
-rw-r--r--llvm/include/llvm/CodeGen/SDPatternMatch.h12
-rw-r--r--llvm/include/llvm/CodeGen/ScheduleDAG.h5
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAGNodes.h2
-rw-r--r--llvm/include/llvm/Frontend/HLSL/HLSLBinding.h162
-rw-r--r--llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h115
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/ClauseT.h5
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/OMP.td2
-rw-r--r--llvm/include/llvm/IR/IntrinsicsAMDGPU.td67
-rw-r--r--llvm/include/llvm/LTO/LTO.h6
-rw-r--r--llvm/include/llvm/MC/MCObjectFileInfo.h5
-rw-r--r--llvm/include/llvm/MC/MCObjectStreamer.h11
-rw-r--r--llvm/include/llvm/MC/MCSection.h40
-rw-r--r--llvm/include/llvm/ObjectYAML/ELFYAML.h2
-rw-r--r--llvm/include/llvm/ProfileData/MemProfData.inc40
-rw-r--r--llvm/include/llvm/Support/VirtualFileSystem.h2
-rw-r--r--llvm/include/llvm/Transforms/IPO/Attributor.h2
-rw-r--r--llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h25
-rw-r--r--llvm/lib/Analysis/CMakeLists.txt1
-rw-r--r--llvm/lib/Analysis/ConstantFolding.cpp7
-rw-r--r--llvm/lib/Analysis/DXILResource.cpp136
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp14
-rw-r--r--llvm/lib/Analysis/TargetTransformInfo.cpp4
-rw-r--r--llvm/lib/Analysis/VectorUtils.cpp12
-rw-r--r--llvm/lib/CGData/StableFunctionMapRecord.cpp16
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp108
-rw-r--r--llvm/lib/CodeGen/CodeGenPrepare.cpp23
-rw-r--r--llvm/lib/CodeGen/MachineFunction.cpp20
-rw-r--r--llvm/lib/CodeGen/MachineScheduler.cpp66
-rw-r--r--llvm/lib/CodeGen/RegAllocBase.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp84
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp3
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp2
-rw-r--r--llvm/lib/CodeGen/TailDuplicator.cpp18
-rw-r--r--llvm/lib/CodeGen/TargetInstrInfo.cpp2
-rw-r--r--llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp119
-rw-r--r--llvm/lib/Frontend/HLSL/CMakeLists.txt1
-rw-r--r--llvm/lib/Frontend/HLSL/HLSLBinding.cpp142
-rw-r--r--llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp464
-rw-r--r--llvm/lib/IR/DebugInfoMetadata.cpp2
-rw-r--r--llvm/lib/LTO/LTO.cpp43
-rw-r--r--llvm/lib/MC/MCObjectFileInfo.cpp20
-rw-r--r--llvm/lib/MC/MCObjectStreamer.cpp119
-rw-r--r--llvm/lib/MC/MCWin64EH.cpp3
-rw-r--r--llvm/lib/MC/MCWinCOFFStreamer.cpp5
-rw-r--r--llvm/lib/Object/ELFObjectFile.cpp9
-rw-r--r--llvm/lib/ObjectYAML/ELFEmitter.cpp6
-rw-r--r--llvm/lib/ObjectYAML/ELFYAML.cpp2
-rw-r--r--llvm/lib/ProfileData/MemProfReader.cpp71
-rw-r--r--llvm/lib/Remarks/RemarkLinker.cpp4
-rw-r--r--llvm/lib/Support/BLAKE3/CMakeLists.txt3
-rw-r--r--llvm/lib/Support/FileCollector.cpp3
-rw-r--r--llvm/lib/Support/VirtualFileSystem.cpp11
-rw-r--r--llvm/lib/Support/Windows/Threading.inc2
-rw-r--r--llvm/lib/Target/AArch64/AArch64Combine.td10
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp12
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp20
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp4
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp47
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp90
-rw-r--r--llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPU.td4
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp23
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp6
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructions.td1
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp12
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp17
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp45
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp74
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSchedStrategy.h3
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.cpp37
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.h6
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp13
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h2
-rw-r--r--llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp3
-rw-r--r--llvm/lib/Target/AMDGPU/SIDefines.h1
-rw-r--r--llvm/lib/Target/AMDGPU/SIFoldOperands.cpp1
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp91
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.td14
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.td2
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp39
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h7
-rw-r--r--llvm/lib/Target/AMDGPU/VOP2Instructions.td4
-rw-r--r--llvm/lib/Target/AMDGPU/VOP3Instructions.td230
-rw-r--r--llvm/lib/Target/AMDGPU/VOPInstructions.td47
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp5
-rw-r--r--llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp2
-rw-r--r--llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp2
-rw-r--r--llvm/lib/Target/CSKY/MCTargetDesc/CSKYAsmBackend.cpp2
-rw-r--r--llvm/lib/Target/DirectX/DXILRootSignature.cpp26
-rw-r--r--llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp2
-rw-r--r--llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp2
-rw-r--r--llvm/lib/Target/M68k/MCTargetDesc/M68kAsmBackend.cpp2
-rw-r--r--llvm/lib/Target/MSP430/MCTargetDesc/MSP430AsmBackend.cpp2
-rw-r--r--llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp6
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp6
-rw-r--r--llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp10
-rw-r--r--llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h1
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXForwardParams.cpp5
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp85
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp12
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrFormats.td10
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp26
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrInfo.td634
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXIntrinsics.td624
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td8
-rw-r--r--llvm/lib/Target/PowerPC/PPCMachineScheduler.cpp24
-rw-r--r--llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp2
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp5
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp13
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td15
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp24
-rw-r--r--llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp17
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp179
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp16
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h2
-rw-r--r--llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp2
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.cpp2
-rw-r--r--llvm/lib/Target/VE/MCTargetDesc/VEAsmBackend.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssembly.td9
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td6
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h2
-rw-r--r--llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp2
-rw-r--r--llvm/lib/Target/X86/X86FastISel.cpp8
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp18
-rw-r--r--llvm/lib/Target/X86/X86ISelLoweringCall.cpp4
-rw-r--r--llvm/lib/TargetParser/Host.cpp13
-rw-r--r--llvm/lib/TargetParser/TargetParser.cpp1
-rw-r--r--llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp102
-rw-r--r--llvm/lib/Transforms/IPO/AttributorAttributes.cpp5
-rw-r--r--llvm/lib/Transforms/IPO/LowerTypeTests.cpp127
-rw-r--r--llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp31
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp8
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp7
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp37
-rw-r--r--llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp30
-rw-r--r--llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp7
-rw-r--r--llvm/lib/Transforms/Scalar/LICM.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp133
-rw-r--r--llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp3
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp71
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp28
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h5
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp4
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp6
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp81
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp8
-rw-r--r--llvm/lib/Transforms/Vectorize/VectorCombine.cpp7
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/fround.ll24
-rw-r--r--llvm/test/Analysis/ScalarEvolution/zext-add.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir5
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll110
-rw-r--r--llvm/test/CodeGen/AArch64/adc.ll49
-rw-r--r--llvm/test/CodeGen/AArch64/addcarry-crash.ll34
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-vabs.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll36
-rw-r--r--llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/cmp-to-cmn.ll437
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll55
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll52
-rw-r--r--llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll91
-rw-r--r--llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll77
-rw-r--r--llvm/test/CodeGen/AArch64/neon-dotreduce.ll4360
-rw-r--r--llvm/test/CodeGen/AArch64/neon-extmul.ll108
-rw-r--r--llvm/test/CodeGen/AArch64/peephole-and-tst.ll275
-rw-r--r--llvm/test/CodeGen/AArch64/sve-vector-interleave.ll103
-rw-r--r--llvm/test/CodeGen/AArch64/sve-vscale-combine.ll105
-rw-r--r--llvm/test/CodeGen/AArch64/tbl-loops.ll3
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-add.ll337
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir1254
-rw-r--r--llvm/test/CodeGen/AMDGPU/add-max.ll62
-rw-r--r--llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll28
-rw-r--r--llvm/test/CodeGen/AMDGPU/fdiv.f16.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll1783
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptrunc.ll633
-rw-r--r--llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/inline-asm-out-of-bounds-register.ll98
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll184
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll539
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll64
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll164
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll72
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll66
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp.ll66
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp10.ll72
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log.ll115
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log10.ll115
-rw-r--r--llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/memmove-var-size.ll36
-rw-r--r--llvm/test/CodeGen/AMDGPU/postra-sched-attribute.ll34
-rw-r--r--llvm/test/CodeGen/AMDGPU/rcp-pattern.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/rsq.f32.ll90
-rw-r--r--llvm/test/CodeGen/AMDGPU/rsq.f64.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/udivrem24.ll1984
-rw-r--r--llvm/test/CodeGen/AMDGPU/use-after-free-after-cleanup-failed-vreg.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll29
-rw-r--r--llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/ARM/nop_concat_vectors.ll8
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll12
-rw-r--r--llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll91
-rw-r--r--llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/NVPTX/fold-movs.ll38
-rw-r--r--llvm/test/CodeGen/NVPTX/i8x4-instructions.ll168
-rw-r--r--llvm/test/CodeGen/NVPTX/ld-param-sink.ll47
-rw-r--r--llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll347
-rw-r--r--llvm/test/CodeGen/PowerPC/no-ctr-loop-if-exit-in-nested-loop.ll5
-rw-r--r--llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll33
-rw-r--r--llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll21
-rw-r--r--llvm/test/CodeGen/RISCV/memset-inline.ll122
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/memset-inline.ll126
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll46
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll144
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll11
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir16
-rw-r--r--llvm/test/CodeGen/RISCV/zilsd.ll19
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll42
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/logical-struct-access.ll3
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-1.ll46
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-2.ll54
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access.ll75
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll104
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll19
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-mul-07.ll114
-rw-r--r--llvm/test/CodeGen/WebAssembly/target-features-cpus.ll4
-rw-r--r--llvm/test/CodeGen/X86/apx/cf.ll35
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section-assembly.ll43
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section-tailcall.ll34
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section.ll38
-rw-r--r--llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll32
-rw-r--r--llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll19
-rw-r--r--llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll20
-rw-r--r--llvm/test/CodeGen/X86/early-tail-dup-computed-goto.mir (renamed from llvm/test/CodeGen/X86/tail-dup-computed-goto.mir)44
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll16
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/globals.ll24
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vop2_fake16_err.s15
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3-fake16.s505
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s505
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16-fake16.s308
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s308
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8-fake16.s220
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8.s220
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s75
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop2_err.s20
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3.txt542
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp16.txt249
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp8.txt183
-rw-r--r--llvm/test/MC/ELF/many-instructions.s10
-rw-r--r--llvm/test/MC/X86/verify-callgraph-section.s58
-rw-r--r--llvm/test/ThinLTO/AArch64/cgdata-merge-read.ll14
-rw-r--r--llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll222
-rw-r--r--llvm/test/Transforms/AggressiveInstCombine/negative-lower-table-based-cttz.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/AArch64/fold-ext-add.ll10
-rw-r--r--llvm/test/Transforms/IndVarSimplify/zext-nuw.ll6
-rw-r--r--llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs-low-threshold.ll24
-rw-r--r--llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs.ll182
-rw-r--r--llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/gepofconstgepi8.ll180
-rw-r--r--llvm/test/Transforms/InstCombine/getelementptr.ll96
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-custom-dl.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-gep.ll7
-rw-r--r--llvm/test/Transforms/InstCombine/indexed-gep-compares.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/known-phi-recurse.ll5
-rw-r--r--llvm/test/Transforms/InstCombine/load-cmp.ll5
-rw-r--r--llvm/test/Transforms/InstCombine/phi.ll55
-rw-r--r--llvm/test/Transforms/InstCombine/pr39908.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll24
-rw-r--r--llvm/test/Transforms/InstCombine/scalable-extract-subvec-elt.ll36
-rw-r--r--llvm/test/Transforms/InstCombine/sub-gep.ll2
-rw-r--r--llvm/test/Transforms/InstSimplify/const-fold-nvvm-unary-arithmetic.ll48
-rw-r--r--llvm/test/Transforms/LICM/gep-reassociate.ll53
-rw-r--r--llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll5
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll8
-rw-r--r--llvm/test/Transforms/LoopUnroll/AArch64/vector.ll194
-rw-r--r--llvm/test/Transforms/LoopUnroll/RISCV/vector.ll603
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll62
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll88
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/f16.ll45
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/interleaved-store-with-gap.ll59
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll1481
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/scalable-reductions.ll729
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll634
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll54
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll27
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll33
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll32
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll328
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll42
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll25
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll27
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll33
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll60
-rw-r--r--llvm/test/Transforms/LoopVectorize/intrinsic.ll201
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll38
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll40
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-inloop.ll54
-rw-r--r--llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll21
-rw-r--r--llvm/test/Transforms/LowerTypeTests/Inputs/exported-funcs.yaml8
-rw-r--r--llvm/test/Transforms/LowerTypeTests/export-alias.ll20
-rw-r--r--llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll2
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/pr88239.ll4
-rw-r--r--llvm/test/Transforms/PhaseOrdering/lower-table-based-cttz.ll42
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll279
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll280
-rw-r--r--llvm/test/Transforms/Scalarizer/intrinsics.ll47
-rw-r--r--llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-or-as-add.ll6
-rw-r--r--llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-sub.ll6
-rw-r--r--llvm/test/Transforms/SimplifyCFG/jump-threading-live-on-exit.ll195
-rw-r--r--llvm/test/Transforms/SimplifyCFG/jump-threading-max-jump-threading-live-blocks.ll95
-rw-r--r--llvm/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll13
-rw-r--r--llvm/test/Transforms/VectorCombine/SPIRV/lit.local.cfg2
-rw-r--r--llvm/test/Transforms/VectorCombine/SPIRV/load-insert-store.ll889
-rw-r--r--llvm/test/lit.cfg.py13
-rw-r--r--llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test778
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofexebin1611256 -> 1666656 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofrawbin75792 -> 20256 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/basic.memprofexebin1604896 -> 1660336 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/basic.memprofrawbin1152 -> 1152 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofexebin0 -> 1604896 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofrawbin0 -> 1152 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/buildid.memprofexebin1604904 -> 1660336 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/buildid.memprofrawbin1152 -> 1152 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/inline.memprofexebin1605480 -> 1660912 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/inline.memprofrawbin976 -> 976 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/multi.memprofexebin1604912 -> 1660352 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/multi.memprofrawbin1920 -> 1920 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofexebin1606576 -> 1661960 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofrawbin74952 -> 19608 bytes
-rwxr-xr-xllvm/test/tools/llvm-profdata/Inputs/pic.memprofexebin1607856 -> 1663288 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/Inputs/pic.memprofrawbin1152 -> 1152 bytes
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-basic-histogram.test4
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-basic.test4
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-basic_v4.test102
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-inline.test2
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-multi.test2
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-padding-histogram.test154
-rw-r--r--llvm/test/tools/llvm-profdata/memprof-pic.test4
-rw-r--r--llvm/test/tools/obj2yaml/ELF/eflags.yaml31
-rw-r--r--llvm/test/tools/yaml2obj/file-header-flags.yaml25
-rw-r--r--llvm/tools/llvm-exegesis/lib/X86/Target.cpp16
-rw-r--r--llvm/tools/llvm-readobj/ELFDumper.cpp10
-rw-r--r--llvm/tools/obj2yaml/elf2yaml.cpp3
-rw-r--r--llvm/unittests/Analysis/InlineAdvisorPlugin/CMakeLists.txt2
-rw-r--r--llvm/unittests/Analysis/InlineOrderPlugin/CMakeLists.txt2
-rw-r--r--llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp32
-rw-r--r--llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp2
-rw-r--r--llvm/unittests/ExecutionEngine/Orc/MemoryMapperTest.cpp6
-rw-r--r--llvm/unittests/Frontend/CMakeLists.txt1
-rw-r--r--llvm/unittests/Frontend/HLSLBindingTest.cpp275
-rw-r--r--llvm/unittests/Support/VirtualFileSystemTest.cpp220
-rw-r--r--llvm/unittests/Target/DirectX/ResourceBindingAnalysisTests.cpp171
-rw-r--r--llvm/unittests/Transforms/Vectorize/VPlanVerifierTest.cpp38
-rw-r--r--llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Frontend/HLSL/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/unittests/Frontend/BUILD.gn1
-rw-r--r--llvm/utils/lit/lit/Test.py3
-rw-r--r--llvm/utils/lit/lit/TestRunner.py2
-rw-r--r--llvm/utils/lit/lit/cl_arguments.py10
-rwxr-xr-xllvm/utils/lit/lit/main.py2
-rw-r--r--llvm/utils/lit/tests/Inputs/xfail-cl/true-xfail-conditionally.txt2
-rw-r--r--llvm/utils/lit/tests/xfail-cl.py28
-rw-r--r--mlir/include/mlir/Conversion/Passes.td22
-rw-r--r--mlir/include/mlir/Dialect/Async/IR/AsyncOps.td3
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.td3
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td47
-rw-r--r--mlir/include/mlir/Dialect/SCF/IR/SCFOps.td15
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td4
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td26
-rw-r--r--mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h4
-rw-r--r--mlir/include/mlir/Dialect/Vector/IR/VectorOps.td6
-rw-r--r--mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td154
-rw-r--r--mlir/include/mlir/Dialect/XeGPU/IR/XeGPUTypes.td1
-rw-r--r--mlir/include/mlir/IR/Operation.h20
-rw-r--r--mlir/include/mlir/Target/LLVMIR/ModuleImport.h4
-rw-r--r--mlir/lib/Conversion/ArithToSPIRV/ArithToSPIRV.cpp38
-rw-r--r--mlir/lib/Conversion/ControlFlowToSPIRV/ControlFlowToSPIRVPass.cpp1
-rw-r--r--mlir/lib/Conversion/FuncToSPIRV/FuncToSPIRVPass.cpp1
-rw-r--r--mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp8
-rw-r--r--mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp34
-rw-r--r--mlir/lib/Conversion/MathToSPIRV/MathToSPIRV.cpp7
-rw-r--r--mlir/lib/Conversion/TensorToSPIRV/TensorToSPIRVPass.cpp1
-rw-r--r--mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp21
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp15
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp71
-rw-r--r--mlir/lib/Dialect/SCF/IR/SCF.cpp8
-rw-r--r--mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp54
-rw-r--r--mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp48
-rw-r--r--mlir/lib/Dialect/Vector/IR/VectorOps.cpp31
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp24
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp2
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp2
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp61
-rw-r--r--mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp103
-rw-r--r--mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp9
-rw-r--r--mlir/lib/Parser/Parser.cpp30
-rw-r--r--mlir/lib/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.cpp36
-rw-r--r--mlir/lib/Target/LLVMIR/ModuleImport.cpp14
-rw-r--r--mlir/lib/Target/LLVMIR/ModuleTranslation.cpp19
-rw-r--r--mlir/lib/Tools/mlir-opt/MlirOptMain.cpp3
-rw-r--r--mlir/test/Conversion/ArithToSPIRV/arith-to-spirv.mlir17
-rw-r--r--mlir/test/Conversion/FuncToSPIRV/types-to-spirv.mlir54
-rw-r--r--mlir/test/Conversion/GPUToSPIRV/lookup-target-env.mlir40
-rw-r--r--mlir/test/Conversion/MathToSPIRV/math-to-fpclassify-spirv.mlir27
-rw-r--r--mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir24
-rw-r--r--mlir/test/Dialect/Async/canonicalize.mlir10
-rw-r--r--mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir55
-rw-r--r--mlir/test/Dialect/SCF/canonicalize.mlir34
-rw-r--r--mlir/test/Dialect/SPIRV/IR/logical-ops.mlir18
-rw-r--r--mlir/test/Dialect/Tosa/canonicalize.mlir20
-rw-r--r--mlir/test/Dialect/Vector/canonicalize.mlir34
-rw-r--r--mlir/test/Dialect/Vector/int-range-interface.mlir17
-rw-r--r--mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir16
-rw-r--r--mlir/test/Dialect/Vector/vector-outerproduct-lowering-transforms.mlir24
-rw-r--r--mlir/test/Dialect/XeGPU/invalid.mlir68
-rw-r--r--mlir/test/Dialect/XeGPU/ops.mlir29
-rw-r--r--mlir/test/IR/top-level.mlir4
-rw-r--r--mlir/test/Target/LLVMIR/Import/module-asm.ll5
-rw-r--r--mlir/test/Target/LLVMIR/invalid-module.mlir12
-rw-r--r--mlir/test/Target/LLVMIR/module-asm.mlir6
-rw-r--r--mlir/test/Target/LLVMIR/nvvmir-invalid.mlir39
-rw-r--r--mlir/test/Target/LLVMIR/nvvmir.mlir23
-rw-r--r--mlir/test/Target/SPIRV/logical-ops.mlir2
-rw-r--r--mlir/test/mlir-tblgen/op-properties-predicates.td6
-rw-r--r--mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp3
-rw-r--r--runtimes/CMakeLists.txt2
-rw-r--r--third-party/benchmark/src/cycleclock.h4
-rw-r--r--utils/bazel/llvm-project-overlay/libc/BUILD.bazel36
-rw-r--r--utils/bazel/llvm-project-overlay/libc/libc_configure_options.bzl3
-rw-r--r--utils/bazel/llvm-project-overlay/lldb/source/Plugins/BUILD.bazel2
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/BUILD.bazel1
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel1
1025 files changed, 35445 insertions, 14363 deletions
diff --git a/bolt/test/binary-analysis/AArch64/cmdline-args.test b/bolt/test/binary-analysis/AArch64/cmdline-args.test
index 76f7c3b..3e70b2c 100644
--- a/bolt/test/binary-analysis/AArch64/cmdline-args.test
+++ b/bolt/test/binary-analysis/AArch64/cmdline-args.test
@@ -4,14 +4,15 @@
# Verify that an error message is provided if an input file is missing or incorrect
RUN: not llvm-bolt-binary-analysis 2>&1 | FileCheck -check-prefix=NOFILEARG %s
-NOFILEARG: llvm-bolt-binary-analysis: Not enough positional command line arguments specified!
-NOFILEARG-NEXT: Must specify at least 1 positional argument: See: {{.*}}llvm-bolt-binary-analysis --help
+NOFILEARG: llvm-bolt-binary-analysis{{(\.exe)?}}: Not enough positional command line arguments specified!
+NOFILEARG-NEXT: Must specify at least 1 positional argument: See: {{.*}}llvm-bolt-binary-analysis{{(\.exe)?}} --help
RUN: not llvm-bolt-binary-analysis non-existing-file 2>&1 | FileCheck -check-prefix=NONEXISTINGFILEARG %s
-NONEXISTINGFILEARG: llvm-bolt-binary-analysis: 'non-existing-file': No such file or directory.
+# Don't check the OS-dependent message "No such file or directory".
+NONEXISTINGFILEARG: llvm-bolt-binary-analysis{{(\.exe)?}}: 'non-existing-file': {{.*}}
RUN: not llvm-bolt-binary-analysis %p/Inputs/dummy.txt 2>&1 | FileCheck -check-prefix=NOELFFILEARG %s
-NOELFFILEARG: llvm-bolt-binary-analysis: '{{.*}}/Inputs/dummy.txt': The file was not recognized as a valid object file.
+NOELFFILEARG: llvm-bolt-binary-analysis{{(\.exe)?}}: '{{.*}}/Inputs/dummy.txt': The file was not recognized as a valid object file.
RUN: %clang %cflags -Wl,--emit-relocs %p/../../Inputs/asm_foo.s %p/../../Inputs/asm_main.c -o %t.exe
RUN: llvm-bolt-binary-analysis %t.exe 2>&1 | FileCheck -check-prefix=VALIDELFFILEARG --allow-empty %s
@@ -26,7 +27,7 @@ RUN: llvm-bolt-binary-analysis --help 2>&1 | FileCheck -check-prefix=HELP %s
HELP: OVERVIEW: BinaryAnalysis
HELP-EMPTY:
-HELP-NEXT: USAGE: llvm-bolt-binary-analysis [options] <executable>
+HELP-NEXT: USAGE: llvm-bolt-binary-analysis{{(\.exe)?}} [options] <executable>
HELP-EMPTY:
HELP-NEXT: OPTIONS:
HELP-EMPTY:
diff --git a/bolt/test/lsda-section-name.cpp b/bolt/test/lsda-section-name.cpp
index 929b17f..e91d0ac 100644
--- a/bolt/test/lsda-section-name.cpp
+++ b/bolt/test/lsda-section-name.cpp
@@ -1,8 +1,8 @@
// This test check that LSDA section named by .gcc_except_table.main is
// disassembled by BOLT.
-// RUN: %clang++ %cxxflags -O3 -no-pie -c %s -o %t.o
-// RUN: %clang++ %cxxflags -O3 -no-pie -fuse-ld=lld %t.o -o %t
+// RUN: %clangxx %cxxflags -O3 -no-pie -c %s -o %t.o
+// RUN: %clangxx %cxxflags -O3 -no-pie -fuse-ld=lld %t.o -o %t
// RUN: llvm-objcopy --rename-section .gcc_except_table=.gcc_except_table.main %t
// RUN: llvm-readelf -SW %t | FileCheck %s
// RUN: llvm-bolt %t -o %t.bolt
diff --git a/clang-tools-extra/clang-apply-replacements/tool/ClangApplyReplacementsMain.cpp b/clang-tools-extra/clang-apply-replacements/tool/ClangApplyReplacementsMain.cpp
index 062e236..76de8bd 100644
--- a/clang-tools-extra/clang-apply-replacements/tool/ClangApplyReplacementsMain.cpp
+++ b/clang-tools-extra/clang-apply-replacements/tool/ClangApplyReplacementsMain.cpp
@@ -97,8 +97,7 @@ int main(int argc, char **argv) {
cl::ParseCommandLineOptions(argc, argv);
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()), DiagOpts);
+ DiagnosticsEngine Diagnostics(DiagnosticIDs::create(), DiagOpts);
// Determine a formatting style from options.
auto FormatStyleOrError = format::getStyle(FormatStyleOpt, FormatStyleConfig,
diff --git a/clang-tools-extra/clang-change-namespace/tool/ClangChangeNamespace.cpp b/clang-tools-extra/clang-change-namespace/tool/ClangChangeNamespace.cpp
index 2a8fe2d..2efdd92 100644
--- a/clang-tools-extra/clang-change-namespace/tool/ClangChangeNamespace.cpp
+++ b/clang-tools-extra/clang-change-namespace/tool/ClangChangeNamespace.cpp
@@ -128,9 +128,8 @@ int main(int argc, const char **argv) {
LangOptions DefaultLangOptions;
DiagnosticOptions DiagOpts;
clang::TextDiagnosticPrinter DiagnosticPrinter(errs(), DiagOpts);
- DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()), DiagOpts,
- &DiagnosticPrinter, false);
+ DiagnosticsEngine Diagnostics(DiagnosticIDs::create(), DiagOpts,
+ &DiagnosticPrinter, false);
auto &FileMgr = Tool.getFiles();
SourceManager Sources(Diagnostics, FileMgr);
Rewriter Rewrite(Sources, DefaultLangOptions);
diff --git a/clang-tools-extra/clang-include-fixer/tool/ClangIncludeFixer.cpp b/clang-tools-extra/clang-include-fixer/tool/ClangIncludeFixer.cpp
index 9f73f47..568cb2b 100644
--- a/clang-tools-extra/clang-include-fixer/tool/ClangIncludeFixer.cpp
+++ b/clang-tools-extra/clang-include-fixer/tool/ClangIncludeFixer.cpp
@@ -454,7 +454,7 @@ int includeFixerMain(int argc, const char **argv) {
// Set up a new source manager for applying the resulting replacements.
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diagnostics(new DiagnosticIDs, DiagOpts);
+ DiagnosticsEngine Diagnostics(DiagnosticIDs::create(), DiagOpts);
TextDiagnosticPrinter DiagnosticPrinter(outs(), DiagOpts);
SourceManager SM(Diagnostics, tool.getFiles());
Diagnostics.setClient(&DiagnosticPrinter, false);
diff --git a/clang-tools-extra/clang-move/tool/ClangMove.cpp b/clang-tools-extra/clang-move/tool/ClangMove.cpp
index 750eb95..1be3cb1 100644
--- a/clang-tools-extra/clang-move/tool/ClangMove.cpp
+++ b/clang-tools-extra/clang-move/tool/ClangMove.cpp
@@ -178,9 +178,8 @@ int main(int argc, const char **argv) {
DiagnosticOptions DiagOpts;
clang::TextDiagnosticPrinter DiagnosticPrinter(errs(), DiagOpts);
- DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()), DiagOpts,
- &DiagnosticPrinter, false);
+ DiagnosticsEngine Diagnostics(DiagnosticIDs::create(), DiagOpts,
+ &DiagnosticPrinter, false);
auto &FileMgr = Tool.getFiles();
SourceManager SM(Diagnostics, FileMgr);
Rewriter Rewrite(SM, LangOptions());
diff --git a/clang-tools-extra/clang-reorder-fields/tool/ClangReorderFields.cpp b/clang-tools-extra/clang-reorder-fields/tool/ClangReorderFields.cpp
index 0350252..fbfce07 100644
--- a/clang-tools-extra/clang-reorder-fields/tool/ClangReorderFields.cpp
+++ b/clang-tools-extra/clang-reorder-fields/tool/ClangReorderFields.cpp
@@ -74,9 +74,8 @@ int main(int argc, const char **argv) {
LangOptions DefaultLangOptions;
DiagnosticOptions DiagOpts;
TextDiagnosticPrinter DiagnosticPrinter(errs(), DiagOpts);
- DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()), DiagOpts,
- &DiagnosticPrinter, false);
+ DiagnosticsEngine Diagnostics(DiagnosticIDs::create(), DiagOpts,
+ &DiagnosticPrinter, false);
auto &FileMgr = Tool.getFiles();
SourceManager Sources(Diagnostics, FileMgr);
diff --git a/clang-tools-extra/clang-tidy/ClangTidy.cpp b/clang-tools-extra/clang-tidy/ClangTidy.cpp
index e84be04..4ae2864 100644
--- a/clang-tools-extra/clang-tidy/ClangTidy.cpp
+++ b/clang-tools-extra/clang-tidy/ClangTidy.cpp
@@ -96,8 +96,7 @@ public:
llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS)
: Files(FileSystemOptions(), std::move(BaseFS)),
DiagPrinter(new TextDiagnosticPrinter(llvm::outs(), DiagOpts)),
- Diags(IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs), DiagOpts,
- DiagPrinter),
+ Diags(DiagnosticIDs::create(), DiagOpts, DiagPrinter),
SourceMgr(Diags, Files), Context(Context), ApplyFixes(ApplyFixes) {
DiagOpts.ShowColors = Context.getOptions().UseColor.value_or(
llvm::sys::Process::StandardOutHasColors());
@@ -570,7 +569,7 @@ runClangTidy(clang::tidy::ClangTidyContext &Context,
ClangTidyDiagnosticConsumer DiagConsumer(Context, nullptr, true, ApplyAnyFix);
auto DiagOpts = std::make_unique<DiagnosticOptions>();
- DiagnosticsEngine DE(new DiagnosticIDs(), *DiagOpts, &DiagConsumer,
+ DiagnosticsEngine DE(DiagnosticIDs::create(), *DiagOpts, &DiagConsumer,
/*ShouldOwnClient=*/false);
Context.setDiagnosticsEngine(std::move(DiagOpts), &DE);
Tool.setDiagnosticConsumer(&DiagConsumer);
diff --git a/clang-tools-extra/clang-tidy/ExpandModularHeadersPPCallbacks.cpp b/clang-tools-extra/clang-tidy/ExpandModularHeadersPPCallbacks.cpp
index 2c17cd3..5e705f7 100644
--- a/clang-tools-extra/clang-tidy/ExpandModularHeadersPPCallbacks.cpp
+++ b/clang-tools-extra/clang-tidy/ExpandModularHeadersPPCallbacks.cpp
@@ -71,7 +71,7 @@ ExpandModularHeadersPPCallbacks::ExpandModularHeadersPPCallbacks(
InMemoryFs(new llvm::vfs::InMemoryFileSystem),
Sources(Compiler.getSourceManager()),
// Forward the new diagnostics to the original DiagnosticConsumer.
- Diags(new DiagnosticIDs, DiagOpts,
+ Diags(DiagnosticIDs::create(), DiagOpts,
new ForwardingDiagnosticConsumer(Compiler.getDiagnosticClient())),
LangOpts(Compiler.getLangOpts()), HSOpts(Compiler.getHeaderSearchOpts()) {
// Add a FileSystem containing the extra files needed in place of modular
diff --git a/clang-tools-extra/clang-tidy/bugprone/BugproneTidyModule.cpp b/clang-tools-extra/clang-tidy/bugprone/BugproneTidyModule.cpp
index ed1fd13..824ebdf 100644
--- a/clang-tools-extra/clang-tidy/bugprone/BugproneTidyModule.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/BugproneTidyModule.cpp
@@ -38,6 +38,7 @@
#include "IncorrectRoundingsCheck.h"
#include "InfiniteLoopCheck.h"
#include "IntegerDivisionCheck.h"
+#include "InvalidEnumDefaultInitializationCheck.h"
#include "LambdaFunctionNameCheck.h"
#include "MacroParenthesesCheck.h"
#include "MacroRepeatedSideEffectsCheck.h"
@@ -165,6 +166,8 @@ public:
CheckFactories.registerCheck<InfiniteLoopCheck>("bugprone-infinite-loop");
CheckFactories.registerCheck<IntegerDivisionCheck>(
"bugprone-integer-division");
+ CheckFactories.registerCheck<InvalidEnumDefaultInitializationCheck>(
+ "bugprone-invalid-enum-default-initialization");
CheckFactories.registerCheck<LambdaFunctionNameCheck>(
"bugprone-lambda-function-name");
CheckFactories.registerCheck<MacroParenthesesCheck>(
diff --git a/clang-tools-extra/clang-tidy/bugprone/CMakeLists.txt b/clang-tools-extra/clang-tidy/bugprone/CMakeLists.txt
index d862794..59928e5 100644
--- a/clang-tools-extra/clang-tidy/bugprone/CMakeLists.txt
+++ b/clang-tools-extra/clang-tidy/bugprone/CMakeLists.txt
@@ -30,6 +30,7 @@ add_clang_library(clangTidyBugproneModule STATIC
InaccurateEraseCheck.cpp
IncorrectEnableIfCheck.cpp
IncorrectEnableSharedFromThisCheck.cpp
+ InvalidEnumDefaultInitializationCheck.cpp
UnintendedCharOstreamOutputCheck.cpp
ReturnConstRefFromParameterCheck.cpp
SuspiciousStringviewDataUsageCheck.cpp
diff --git a/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.cpp
new file mode 100644
index 0000000..33fcf45
--- /dev/null
+++ b/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.cpp
@@ -0,0 +1,180 @@
+//===--- InvalidEnumDefaultInitializationCheck.cpp - clang-tidy -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "InvalidEnumDefaultInitializationCheck.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include <algorithm>
+
+using namespace clang::ast_matchers;
+
+namespace clang::tidy::bugprone {
+
+namespace {
+
+bool isCompleteAndHasNoZeroValue(const EnumDecl *D) {
+ const EnumDecl *Definition = D->getDefinition();
+ return Definition && Definition->isComplete() &&
+ !Definition->enumerators().empty() &&
+ std::none_of(Definition->enumerator_begin(),
+ Definition->enumerator_end(),
+ [](const EnumConstantDecl *Value) {
+ return Value->getInitVal().isZero();
+ });
+}
+
+AST_MATCHER(EnumDecl, isCompleteAndHasNoZeroValue) {
+ return isCompleteAndHasNoZeroValue(&Node);
+}
+
+// Find an initialization which initializes the value (if it has enum type) to a
+// default zero value.
+AST_MATCHER(Expr, isEmptyInit) {
+ if (isa<CXXScalarValueInitExpr, ImplicitValueInitExpr>(&Node))
+ return true;
+ if (const auto *Init = dyn_cast<InitListExpr>(&Node)) {
+ if (Init->getNumInits() == 0)
+ return true;
+ }
+ return false;
+}
+
+AST_MATCHER(InitListExpr, hasArrayFiller) { return Node.hasArrayFiller(); }
+
+// Check if any type has a "child" type that is an enum without zero value.
+// The "child" type can be an array element type or member type of a record
+// type (or a recursive combination of these). In this case, if the "root" type
+// is statically initialized, the enum component is initialized to zero.
+class FindEnumMember : public TypeVisitor<FindEnumMember, bool> {
+public:
+ const EnumType *FoundEnum = nullptr;
+
+ bool VisitType(const Type *T) {
+ const Type *DesT = T->getUnqualifiedDesugaredType();
+ if (DesT != T)
+ return Visit(DesT);
+ return false;
+ }
+ bool VisitArrayType(const ArrayType *T) {
+ return Visit(T->getElementType().getTypePtr());
+ }
+ bool VisitConstantArrayType(const ConstantArrayType *T) {
+ return Visit(T->getElementType().getTypePtr());
+ }
+ bool VisitEnumType(const EnumType *T) {
+ if (isCompleteAndHasNoZeroValue(T->getDecl())) {
+ FoundEnum = T;
+ return true;
+ }
+ return false;
+ }
+ bool VisitRecordType(const RecordType *T) {
+ const RecordDecl *RD = T->getDecl();
+ if (RD->isUnion())
+ return false;
+ auto VisitField = [this](const FieldDecl *F) {
+ return Visit(F->getType().getTypePtr());
+ };
+ return llvm::any_of(RD->fields(), VisitField);
+ }
+};
+
+} // namespace
+
+InvalidEnumDefaultInitializationCheck::InvalidEnumDefaultInitializationCheck(
+ StringRef Name, ClangTidyContext *Context)
+ : ClangTidyCheck(Name, Context) {}
+
+void InvalidEnumDefaultInitializationCheck::registerMatchers(
+ MatchFinder *Finder) {
+ auto EnumWithoutZeroValue = enumType(
+ hasDeclaration(enumDecl(isCompleteAndHasNoZeroValue()).bind("enum")));
+ auto EnumOrArrayOfEnum = qualType(hasUnqualifiedDesugaredType(
+ anyOf(EnumWithoutZeroValue,
+ arrayType(hasElementType(qualType(
+ hasUnqualifiedDesugaredType(EnumWithoutZeroValue)))))));
+ Finder->addMatcher(
+ expr(isEmptyInit(), hasType(EnumOrArrayOfEnum)).bind("expr"), this);
+
+ // Array initialization can contain an "array filler" for the (syntactically)
+ // unspecified elements. This expression is not found by AST matchers and can
+ // have any type (the array's element type). This is an implicitly generated
+ // initialization, so if the type contains somewhere an enum without zero
+ // enumerator, the zero initialization applies here. We search this array
+ // element type for the specific enum type manually when this matcher matches.
+ Finder->addMatcher(initListExpr(hasArrayFiller()).bind("array_filler_expr"),
+ this);
+}
+
+void InvalidEnumDefaultInitializationCheck::check(
+ const MatchFinder::MatchResult &Result) {
+ const auto *InitExpr = Result.Nodes.getNodeAs<Expr>("expr");
+ const auto *Enum = Result.Nodes.getNodeAs<EnumDecl>("enum");
+ if (!InitExpr) {
+ const auto *InitList =
+ Result.Nodes.getNodeAs<InitListExpr>("array_filler_expr");
+ // Initialization of omitted array elements with array filler was found.
+ // Check the type for enum without zero value.
+ // FIXME: In this way only one enum-typed value is found, not all of these.
+ FindEnumMember Finder;
+ if (!Finder.Visit(InitList->getArrayFiller()->getType().getTypePtr()))
+ return;
+ InitExpr = InitList;
+ Enum = Finder.FoundEnum->getDecl();
+ }
+
+ if (!InitExpr || !Enum)
+ return;
+
+ ASTContext &ACtx = Enum->getASTContext();
+ SourceLocation Loc = InitExpr->getExprLoc();
+ if (Loc.isInvalid()) {
+ if (isa<ImplicitValueInitExpr, InitListExpr>(InitExpr)) {
+ DynTypedNodeList Parents = ACtx.getParents(*InitExpr);
+ if (Parents.empty())
+ return;
+
+ if (const auto *Ctor = Parents[0].get<CXXConstructorDecl>()) {
+ // Try to find member initializer with the found expression and get the
+ // source location from it.
+ CXXCtorInitializer *const *CtorInit = std::find_if(
+ Ctor->init_begin(), Ctor->init_end(),
+ [InitExpr](const CXXCtorInitializer *Init) {
+ return Init->isMemberInitializer() && Init->getInit() == InitExpr;
+ });
+ if (!CtorInit)
+ return;
+ Loc = (*CtorInit)->getLParenLoc();
+ } else if (const auto *InitList = Parents[0].get<InitListExpr>()) {
+ // The expression may be implicitly generated for an initialization.
+ // Search for a parent initialization list with valid source location.
+ while (InitList->getExprLoc().isInvalid()) {
+ DynTypedNodeList Parents = ACtx.getParents(*InitList);
+ if (Parents.empty())
+ return;
+ InitList = Parents[0].get<InitListExpr>();
+ if (!InitList)
+ return;
+ }
+ Loc = InitList->getExprLoc();
+ }
+ }
+ // If still not found a source location, omit the warning.
+ // Ideally all such cases (if they exist) should be handled to make the
+ // check more precise.
+ if (Loc.isInvalid())
+ return;
+ }
+ diag(Loc, "enum value of type %0 initialized with invalid value of 0, "
+ "enum doesn't have a zero-value enumerator")
+ << Enum;
+ diag(Enum->getLocation(), "enum is defined here", DiagnosticIDs::Note);
+}
+
+} // namespace clang::tidy::bugprone
diff --git a/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.h b/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.h
new file mode 100644
index 0000000..0746c4d
--- /dev/null
+++ b/clang-tools-extra/clang-tidy/bugprone/InvalidEnumDefaultInitializationCheck.h
@@ -0,0 +1,31 @@
+//===--- InvalidEnumDefaultInitializationCheck.h - clang-tidy -*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_INVALIDENUMDEFAULTINITIALIZATIONCHECK_H
+#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_INVALIDENUMDEFAULTINITIALIZATIONCHECK_H
+
+#include "../ClangTidyCheck.h"
+
+namespace clang::tidy::bugprone {
+
+/// Detects default initialization (to 0) of variables with `enum` type where
+/// the enum has no enumerator with value of 0.
+///
+/// For the user-facing documentation see:
+/// http://clang.llvm.org/extra/clang-tidy/checks/bugprone/invalid-enum-default-initialization.html
+class InvalidEnumDefaultInitializationCheck : public ClangTidyCheck {
+public:
+ InvalidEnumDefaultInitializationCheck(StringRef Name,
+ ClangTidyContext *Context);
+ void registerMatchers(ast_matchers::MatchFinder *Finder) override;
+ void check(const ast_matchers::MatchFinder::MatchResult &Result) override;
+};
+
+} // namespace clang::tidy::bugprone
+
+#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_BUGPRONE_INVALIDENUMDEFAULTINITIALIZATIONCHECK_H
diff --git a/clang-tools-extra/clang-tidy/plugin/ClangTidyPlugin.cpp b/clang-tools-extra/clang-tidy/plugin/ClangTidyPlugin.cpp
index 651a63b..195418d 100644
--- a/clang-tools-extra/clang-tidy/plugin/ClangTidyPlugin.cpp
+++ b/clang-tools-extra/clang-tidy/plugin/ClangTidyPlugin.cpp
@@ -41,7 +41,7 @@ public:
new ClangTidyDiagnosticConsumer(*Context, &Compiler.getDiagnostics());
auto DiagOpts = std::make_unique<DiagnosticOptions>();
auto DiagEngine = std::make_unique<DiagnosticsEngine>(
- new DiagnosticIDs, *DiagOpts, DiagConsumer);
+ DiagnosticIDs::create(), *DiagOpts, DiagConsumer);
Context->setDiagnosticsEngine(std::move(DiagOpts), DiagEngine.get());
// Create the AST consumer.
diff --git a/clang-tools-extra/clangd/Preamble.cpp b/clang-tools-extra/clangd/Preamble.cpp
index 7b4d63f..8af9e46 100644
--- a/clang-tools-extra/clangd/Preamble.cpp
+++ b/clang-tools-extra/clangd/Preamble.cpp
@@ -659,7 +659,7 @@ buildPreamble(PathRef FileName, CompilerInvocation CI,
WallTimer PreambleTimer;
PreambleTimer.startTimer();
auto BuiltPreamble = PrecompiledPreamble::Build(
- CI, ContentsBuffer.get(), Bounds, *PreambleDiagsEngine,
+ CI, ContentsBuffer.get(), Bounds, PreambleDiagsEngine,
Stats ? TimedFS : StatCacheFS, std::make_shared<PCHContainerOperations>(),
StoreInMemory, /*StoragePath=*/"", CapturedInfo);
diff --git a/clang-tools-extra/clangd/SystemIncludeExtractor.cpp b/clang-tools-extra/clangd/SystemIncludeExtractor.cpp
index 0b067e8..106de1b 100644
--- a/clang-tools-extra/clangd/SystemIncludeExtractor.cpp
+++ b/clang-tools-extra/clangd/SystemIncludeExtractor.cpp
@@ -254,7 +254,7 @@ bool isValidTarget(llvm::StringRef Triple) {
std::shared_ptr<TargetOptions> TargetOpts(new TargetOptions);
TargetOpts->Triple = Triple.str();
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diags(new DiagnosticIDs, DiagOpts,
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), DiagOpts,
new IgnoringDiagConsumer);
llvm::IntrusiveRefCntPtr<TargetInfo> Target =
TargetInfo::CreateTargetInfo(Diags, *TargetOpts);
diff --git a/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp b/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp
index 5a5d815..61bd631 100644
--- a/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp
+++ b/clang-tools-extra/clangd/unittests/CodeCompleteTests.cpp
@@ -3267,6 +3267,56 @@ TEST(SignatureHelpTest, VariadicType) {
}
}
+TEST(SignatureHelpTest, SkipExplicitObjectParameter) {
+ Annotations Code(R"cpp(
+ struct A {
+ void foo(this auto&& self, int arg);
+ void bar(this A self, int arg);
+ };
+ int main() {
+ A a {};
+ a.foo($c1^);
+ (&A::bar)($c2^);
+ (&A::foo)($c3^);
+ }
+ )cpp");
+
+ auto TU = TestTU::withCode(Code.code());
+ TU.ExtraArgs = {"-std=c++23"};
+
+ MockFS FS;
+ auto Inputs = TU.inputs(FS);
+
+ auto Preamble = TU.preamble();
+ ASSERT_TRUE(Preamble);
+
+ {
+ const auto Result = signatureHelp(testPath(TU.Filename), Code.point("c1"),
+ *Preamble, Inputs, MarkupKind::PlainText);
+
+ EXPECT_EQ(1U, Result.signatures.size());
+
+ EXPECT_THAT(Result.signatures[0], AllOf(sig("foo([[int arg]]) -> void")));
+ }
+ {
+ const auto Result = signatureHelp(testPath(TU.Filename), Code.point("c2"),
+ *Preamble, Inputs, MarkupKind::PlainText);
+
+ EXPECT_EQ(1U, Result.signatures.size());
+
+ EXPECT_THAT(Result.signatures[0], AllOf(sig("([[A]], [[int]]) -> void")));
+ }
+ {
+ // TODO: llvm/llvm-project/146649
+ const auto Result = signatureHelp(testPath(TU.Filename), Code.point("c3"),
+ *Preamble, Inputs, MarkupKind::PlainText);
+ // TODO: We expect 1 signature here, with this signature
+ EXPECT_EQ(0U, Result.signatures.size());
+ // EXPECT_THAT(Result.signatures[0], AllOf(sig("([[auto&&]], [[int]]) ->
+ // void")));
+ }
+}
+
TEST(CompletionTest, IncludedCompletionKinds) {
Annotations Test(R"cpp(#include "^)cpp");
auto TU = TestTU::withCode(Test.code());
@@ -4369,14 +4419,24 @@ TEST(CompletionTest, SkipExplicitObjectParameter) {
Annotations Code(R"cpp(
struct A {
void foo(this auto&& self, int arg);
+ void bar(this A self, int arg);
};
int main() {
A a {};
- a.^
+ a.$c1^;
+ (&A::fo$c2^;
+ (&A::ba$c3^;
}
)cpp");
+ // TODO: llvm/llvm-project/146649
+ // This is incorrect behavior. Correct Result should be a variant of,
+ // c2: signature = (auto&& self, int arg)
+ // snippet = (${1: auto&& self}, ${2: int arg})
+ // c3: signature = (A self, int arg)
+ // snippet = (${1: A self}, ${2: int arg})
+
auto TU = TestTU::withCode(Code.code());
TU.ExtraArgs = {"-std=c++23"};
@@ -4387,12 +4447,31 @@ TEST(CompletionTest, SkipExplicitObjectParameter) {
MockFS FS;
auto Inputs = TU.inputs(FS);
- auto Result = codeComplete(testPath(TU.Filename), Code.point(),
- Preamble.get(), Inputs, Opts);
-
- EXPECT_THAT(Result.Completions,
- ElementsAre(AllOf(named("foo"), signature("(int arg)"),
- snippetSuffix("(${1:int arg})"))));
+ {
+ auto Result = codeComplete(testPath(TU.Filename), Code.point("c1"),
+ Preamble.get(), Inputs, Opts);
+
+ EXPECT_THAT(Result.Completions,
+ UnorderedElementsAre(AllOf(named("foo"), signature("(int arg)"),
+ snippetSuffix("(${1:int arg})")),
+ AllOf(named("bar"), signature("(int arg)"),
+ snippetSuffix("(${1:int arg})"))));
+ }
+ {
+ auto Result = codeComplete(testPath(TU.Filename), Code.point("c2"),
+ Preamble.get(), Inputs, Opts);
+ EXPECT_THAT(
+ Result.Completions,
+ ElementsAre(AllOf(named("foo"), signature("<class self:auto>(int arg)"),
+ snippetSuffix("<${1:class self:auto}>"))));
+ }
+ {
+ auto Result = codeComplete(testPath(TU.Filename), Code.point("c3"),
+ Preamble.get(), Inputs, Opts);
+ EXPECT_THAT(Result.Completions,
+ ElementsAre(AllOf(named("bar"), signature("(int arg)"),
+ snippetSuffix(""))));
+ }
}
} // namespace
} // namespace clangd
diff --git a/clang-tools-extra/clangd/unittests/ConfigCompileTests.cpp b/clang-tools-extra/clangd/unittests/ConfigCompileTests.cpp
index 75d0ff2..0e411b2 100644
--- a/clang-tools-extra/clangd/unittests/ConfigCompileTests.cpp
+++ b/clang-tools-extra/clangd/unittests/ConfigCompileTests.cpp
@@ -299,7 +299,7 @@ TEST_F(ConfigCompileTests, DiagnosticSuppression) {
"typecheck_bool_condition",
"unexpected_friend", "warn_alloca"));
clang::DiagnosticOptions DiagOpts;
- clang::DiagnosticsEngine DiagEngine(new DiagnosticIDs, DiagOpts,
+ clang::DiagnosticsEngine DiagEngine(DiagnosticIDs::create(), DiagOpts,
new clang::IgnoringDiagConsumer);
using Diag = clang::Diagnostic;
diff --git a/clang-tools-extra/clangd/unittests/tweaks/TweakTests.cpp b/clang-tools-extra/clangd/unittests/tweaks/TweakTests.cpp
index e39b702..b6607e9 100644
--- a/clang-tools-extra/clangd/unittests/tweaks/TweakTests.cpp
+++ b/clang-tools-extra/clangd/unittests/tweaks/TweakTests.cpp
@@ -45,7 +45,7 @@ TEST(FileEdits, AbsolutePath) {
MemFS->addFile(Path, 0, llvm::MemoryBuffer::getMemBuffer("", Path));
FileManager FM(FileSystemOptions(), MemFS);
DiagnosticOptions DiagOpts;
- DiagnosticsEngine DE(new DiagnosticIDs, DiagOpts);
+ DiagnosticsEngine DE(DiagnosticIDs::create(), DiagOpts);
SourceManager SM(DE, FM);
for (const auto *Path : RelPaths) {
diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst
index 2de2818..e45f870 100644
--- a/clang-tools-extra/docs/ReleaseNotes.rst
+++ b/clang-tools-extra/docs/ReleaseNotes.rst
@@ -108,6 +108,12 @@ Improvements to clang-tidy
New checks
^^^^^^^^^^
+- New :doc:`bugprone-invalid-enum-default-initialization
+ <clang-tidy/checks/bugprone/invalid-enum-default-initialization>` check.
+
+ Detects default initialization (to 0) of variables with ``enum`` type where
+ the enum has no enumerator with value of 0.
+
- New :doc:`llvm-mlir-op-builder
<clang-tidy/checks/llvm/use-new-mlir-op-builder>` check.
diff --git a/clang-tools-extra/docs/clang-tidy/checks/bugprone/invalid-enum-default-initialization.rst b/clang-tools-extra/docs/clang-tidy/checks/bugprone/invalid-enum-default-initialization.rst
new file mode 100644
index 0000000..a3bd2b6
--- /dev/null
+++ b/clang-tools-extra/docs/clang-tidy/checks/bugprone/invalid-enum-default-initialization.rst
@@ -0,0 +1,72 @@
+.. title:: clang-tidy - bugprone-invalid-enum-default-initialization
+
+bugprone-invalid-enum-default-initialization
+============================================
+
+Detects default initialization (to 0) of variables with ``enum`` type where
+the enum has no enumerator with value of 0.
+
+In C++ a default initialization is performed if a variable is initialized with
+initializer list or in other implicit ways, and no value is specified at the
+initialization. In such cases the value 0 is used for the initialization.
+This also applies to enumerations even if it does not have an enumerator with
+value 0. In this way a variable with the ``enum`` type may contain initially an
+invalid value (if the program expects that it contains only the listed
+enumerator values).
+
+The check emits a warning only if an ``enum`` variable is default-initialized
+(contrary to not initialized) and the ``enum`` does not have an enumerator with
+value of 0. The type can be a scoped or non-scoped ``enum``. Unions are not
+handled by the check (if it contains a member of enumeration type).
+
+.. code-block:: c++
+
+ enum class Enum1: int {
+ A = 1,
+ B
+ };
+
+ enum class Enum0: int {
+ A = 0,
+ B
+ };
+
+ void f() {
+ Enum1 X1{}; // warn: 'X1' is initialized to 0
+ Enum1 X2 = Enum1(); // warn: 'X2' is initialized to 0
+ Enum1 X3; // no warning: 'X3' is not initialized
+ Enum0 X4{}; // no warning: type has an enumerator with value of 0
+ }
+
+ struct S1 {
+ Enum1 A;
+ S(): A() {} // warn: 'A' is initialized to 0
+ };
+
+ struct S2 {
+ int A;
+ Enum1 B;
+ };
+
+ S2 VarS2{}; // warn: member 'B' is initialized to 0
+
+The check applies to initialization of arrays or structures with initialization
+lists in C code too. In these cases elements not specified in the list (and have
+enum type) are set to 0.
+
+.. code-block:: c
+
+ enum Enum1 {
+ Enum1_A = 1,
+ Enum1_B
+ };
+ struct Struct1 {
+ int a;
+ enum Enum1 b;
+ };
+
+ enum Enum1 Array1[2] = {Enum1_A}; // warn: omitted elements are initialized to 0
+ enum Enum1 Array2[2][2] = {{Enum1_A}, {Enum1_A}}; // warn: last element of both nested arrays is initialized to 0
+ enum Enum1 Array3[2][2] = {{Enum1_A, Enum1_A}}; // warn: elements of second array are initialized to 0
+
+ struct Struct1 S1 = {1}; // warn: element 'b' is initialized to 0
diff --git a/clang-tools-extra/docs/clang-tidy/checks/list.rst b/clang-tools-extra/docs/clang-tidy/checks/list.rst
index 20a43274f..b6444eb 100644
--- a/clang-tools-extra/docs/clang-tidy/checks/list.rst
+++ b/clang-tools-extra/docs/clang-tidy/checks/list.rst
@@ -106,6 +106,7 @@ Clang-Tidy Checks
:doc:`bugprone-incorrect-roundings <bugprone/incorrect-roundings>`,
:doc:`bugprone-infinite-loop <bugprone/infinite-loop>`,
:doc:`bugprone-integer-division <bugprone/integer-division>`,
+ :doc:`bugprone-invalid-enum-default-initialization <bugprone/invalid-enum-default-initialization>`,
:doc:`bugprone-lambda-function-name <bugprone/lambda-function-name>`,
:doc:`bugprone-macro-parentheses <bugprone/macro-parentheses>`, "Yes"
:doc:`bugprone-macro-repeated-side-effects <bugprone/macro-repeated-side-effects>`,
diff --git a/clang-tools-extra/modularize/ModularizeUtilities.cpp b/clang-tools-extra/modularize/ModularizeUtilities.cpp
index 8a24f21..4dd84fe 100644
--- a/clang-tools-extra/modularize/ModularizeUtilities.cpp
+++ b/clang-tools-extra/modularize/ModularizeUtilities.cpp
@@ -47,7 +47,7 @@ ModularizeUtilities::ModularizeUtilities(std::vector<std::string> &InputPaths,
ProblemFilesPath(ProblemFilesListPath), HasModuleMap(false),
MissingHeaderCount(0),
// Init clang stuff needed for loading the module map and preprocessing.
- LangOpts(new LangOptions()), DiagIDs(new DiagnosticIDs()),
+ LangOpts(new LangOptions()), DiagIDs(DiagnosticIDs::create()),
DC(llvm::errs(), DiagnosticOpts),
Diagnostics(new DiagnosticsEngine(DiagIDs, DiagnosticOpts, &DC, false)),
TargetOpts(new ModuleMapTargetOptions()),
diff --git a/clang-tools-extra/test/clang-apply-replacements/basic.cpp b/clang-tools-extra/test/clang-apply-replacements/basic.cpp
index 4f19a96..2399307 100644
--- a/clang-tools-extra/test/clang-apply-replacements/basic.cpp
+++ b/clang-tools-extra/test/clang-apply-replacements/basic.cpp
@@ -1,17 +1,17 @@
-// RUN: mkdir -p %T/Inputs/basic
-// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/basic/basic.h > %T/Inputs/basic/basic.h
-// RUN: sed "s#\$(path)#%/T/Inputs/basic#" %S/Inputs/basic/file1.yaml > %T/Inputs/basic/file1.yaml
-// RUN: sed "s#\$(path)#%/T/Inputs/basic#" %S/Inputs/basic/file2.yaml > %T/Inputs/basic/file2.yaml
-// RUN: clang-apply-replacements %T/Inputs/basic
-// RUN: FileCheck -input-file=%T/Inputs/basic/basic.h %S/Inputs/basic/basic.h
+// RUN: mkdir -p %t.dir/Inputs/basic
+// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/basic/basic.h > %t.dir/Inputs/basic/basic.h
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/basic#" %S/Inputs/basic/file1.yaml > %t.dir/Inputs/basic/file1.yaml
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/basic#" %S/Inputs/basic/file2.yaml > %t.dir/Inputs/basic/file2.yaml
+// RUN: clang-apply-replacements %t.dir/Inputs/basic
+// RUN: FileCheck -input-file=%t.dir/Inputs/basic/basic.h %S/Inputs/basic/basic.h
//
// Check that the yaml files are *not* deleted after running clang-apply-replacements without remove-change-desc-files.
-// RUN: ls -1 %T/Inputs/basic | FileCheck %s --check-prefix=YAML
+// RUN: ls -1 %t.dir/Inputs/basic | FileCheck %s --check-prefix=YAML
//
// Check that the yaml files *are* deleted after running clang-apply-replacements with remove-change-desc-files.
-// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/basic/basic.h > %T/Inputs/basic/basic.h
-// RUN: clang-apply-replacements -remove-change-desc-files %T/Inputs/basic
-// RUN: ls -1 %T/Inputs/basic | FileCheck %s --check-prefix=NO_YAML
+// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/basic/basic.h > %t.dir/Inputs/basic/basic.h
+// RUN: clang-apply-replacements -remove-change-desc-files %t.dir/Inputs/basic
+// RUN: ls -1 %t.dir/Inputs/basic | FileCheck %s --check-prefix=NO_YAML
//
// YAML: {{^file.\.yaml$}}
// NO_YAML-NOT: {{^file.\.yaml$}}
diff --git a/clang-tools-extra/test/clang-apply-replacements/conflict.cpp b/clang-tools-extra/test/clang-apply-replacements/conflict.cpp
index c1f2342..7b0a8c2 100644
--- a/clang-tools-extra/test/clang-apply-replacements/conflict.cpp
+++ b/clang-tools-extra/test/clang-apply-replacements/conflict.cpp
@@ -1,17 +1,17 @@
-// RUN: mkdir -p %T/Inputs/conflict
-// RUN: sed "s#\$(path)#%/S/Inputs/conflict#" %S/Inputs/conflict/file1.yaml > %T/Inputs/conflict/file1.yaml
-// RUN: sed "s#\$(path)#%/S/Inputs/conflict#" %S/Inputs/conflict/file2.yaml > %T/Inputs/conflict/file2.yaml
-// RUN: sed "s#\$(path)#%/S/Inputs/conflict#" %S/Inputs/conflict/file3.yaml > %T/Inputs/conflict/file3.yaml
-// RUN: sed "s#\$(path)#%/S/Inputs/conflict#" %S/Inputs/conflict/expected.txt > %T/Inputs/conflict/expected.txt
-// RUN: not clang-apply-replacements %T/Inputs/conflict > %T/Inputs/conflict/output.txt 2>&1
-// RUN: diff -b %T/Inputs/conflict/output.txt %T/Inputs/conflict/expected.txt
+// RUN: mkdir -p %t.dir/Inputs/conflict
+// RUN: sed "s#\$(path)#%/S/Inputs/conflict#" %S/Inputs/conflict/file1.yaml > %t.dir/Inputs/conflict/file1.yaml
+// RUN: sed "s#\$(path)#%/S/Inputs/conflict#" %S/Inputs/conflict/file2.yaml > %t.dir/Inputs/conflict/file2.yaml
+// RUN: sed "s#\$(path)#%/S/Inputs/conflict#" %S/Inputs/conflict/file3.yaml > %t.dir/Inputs/conflict/file3.yaml
+// RUN: sed "s#\$(path)#%/S/Inputs/conflict#" %S/Inputs/conflict/expected.txt > %t.dir/Inputs/conflict/expected.txt
+// RUN: not clang-apply-replacements %t.dir/Inputs/conflict > %t.dir/Inputs/conflict/output.txt 2>&1
+// RUN: diff -b %t.dir/Inputs/conflict/output.txt %t.dir/Inputs/conflict/expected.txt
//
// Check that the yaml files are *not* deleted after running clang-apply-replacements without remove-change-desc-files even when there is a failure.
-// RUN: ls -1 %T/Inputs/conflict | FileCheck %s --check-prefix=YAML
+// RUN: ls -1 %t.dir/Inputs/conflict | FileCheck %s --check-prefix=YAML
//
// Check that the yaml files *are* deleted after running clang-apply-replacements with remove-change-desc-files even when there is a failure.
-// RUN: not clang-apply-replacements %T/Inputs/conflict -remove-change-desc-files > %T/Inputs/conflict/output.txt 2>&1
-// RUN: ls -1 %T/Inputs/conflict | FileCheck %s --check-prefix=NO_YAML
+// RUN: not clang-apply-replacements %t.dir/Inputs/conflict -remove-change-desc-files > %t.dir/Inputs/conflict/output.txt 2>&1
+// RUN: ls -1 %t.dir/Inputs/conflict | FileCheck %s --check-prefix=NO_YAML
//
// YAML: {{^file.\.yaml$}}
// NO_YAML-NOT: {{^file.\.yaml$}}
diff --git a/clang-tools-extra/test/clang-apply-replacements/crlf.cpp b/clang-tools-extra/test/clang-apply-replacements/crlf.cpp
index 15ba5b5..f40429e 100644
--- a/clang-tools-extra/test/clang-apply-replacements/crlf.cpp
+++ b/clang-tools-extra/test/clang-apply-replacements/crlf.cpp
@@ -1,5 +1,5 @@
-// RUN: mkdir -p %T/Inputs/crlf
-// RUN: cat %S/Inputs/crlf/crlf.cpp > %T/Inputs/crlf/crlf.cpp
-// RUN: sed "s#\$(path)#%/T/Inputs/crlf#" %S/Inputs/crlf/file1.yaml > %T/Inputs/crlf/file1.yaml
-// RUN: clang-apply-replacements %T/Inputs/crlf
-// RUN: diff %T/Inputs/crlf/crlf.cpp %S/Inputs/crlf/crlf.cpp.expected
+// RUN: mkdir -p %t.dir/Inputs/crlf
+// RUN: cat %S/Inputs/crlf/crlf.cpp > %t.dir/Inputs/crlf/crlf.cpp
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/crlf#" %S/Inputs/crlf/file1.yaml > %t.dir/Inputs/crlf/file1.yaml
+// RUN: clang-apply-replacements %t.dir/Inputs/crlf
+// RUN: diff %t.dir/Inputs/crlf/crlf.cpp %S/Inputs/crlf/crlf.cpp.expected
diff --git a/clang-tools-extra/test/clang-apply-replacements/format-header.cpp b/clang-tools-extra/test/clang-apply-replacements/format-header.cpp
index 6a221c4..9d2680e 100644
--- a/clang-tools-extra/test/clang-apply-replacements/format-header.cpp
+++ b/clang-tools-extra/test/clang-apply-replacements/format-header.cpp
@@ -1,13 +1,13 @@
-// RUN: mkdir -p %T/Inputs/format_header_yes
-// RUN: mkdir -p %T/Inputs/format_header_no
+// RUN: mkdir -p %t.dir/Inputs/format_header_yes
+// RUN: mkdir -p %t.dir/Inputs/format_header_no
//
//
-// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/format_header/yes.cpp > %T/Inputs/format_header_yes/yes.cpp
-// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/format_header/no.cpp > %T/Inputs/format_header_no/no.cpp
-// RUN: sed "s#\$(path)#%/T/Inputs/format_header_yes#" %S/Inputs/format_header/yes.yaml > %T/Inputs/format_header_yes/yes.yaml
-// RUN: sed "s#\$(path)#%/T/Inputs/format_header_no#" %S/Inputs/format_header/no.yaml > %T/Inputs/format_header_no/no.yaml
-// RUN: clang-apply-replacements -format -style="{BasedOnStyle: llvm, SortIncludes: CaseSensitive}" %T/Inputs/format_header_yes
-// RUN: clang-apply-replacements %T/Inputs/format_header_no
-// RUN: FileCheck --strict-whitespace -input-file=%T/Inputs/format_header_yes/yes.cpp %S/Inputs/format_header/yes.cpp
-// RUN: FileCheck --strict-whitespace -input-file=%T/Inputs/format_header_no/no.cpp %S/Inputs/format_header/no.cpp
+// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/format_header/yes.cpp > %t.dir/Inputs/format_header_yes/yes.cpp
+// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/format_header/no.cpp > %t.dir/Inputs/format_header_no/no.cpp
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/format_header_yes#" %S/Inputs/format_header/yes.yaml > %t.dir/Inputs/format_header_yes/yes.yaml
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/format_header_no#" %S/Inputs/format_header/no.yaml > %t.dir/Inputs/format_header_no/no.yaml
+// RUN: clang-apply-replacements -format -style="{BasedOnStyle: llvm, SortIncludes: CaseSensitive}" %t.dir/Inputs/format_header_yes
+// RUN: clang-apply-replacements %t.dir/Inputs/format_header_no
+// RUN: FileCheck --strict-whitespace -input-file=%t.dir/Inputs/format_header_yes/yes.cpp %S/Inputs/format_header/yes.cpp
+// RUN: FileCheck --strict-whitespace -input-file=%t.dir/Inputs/format_header_no/no.cpp %S/Inputs/format_header/no.cpp
//
diff --git a/clang-tools-extra/test/clang-apply-replacements/format.cpp b/clang-tools-extra/test/clang-apply-replacements/format.cpp
index 7de320d..0f40ef62 100644
--- a/clang-tools-extra/test/clang-apply-replacements/format.cpp
+++ b/clang-tools-extra/test/clang-apply-replacements/format.cpp
@@ -1,15 +1,15 @@
-// RUN: mkdir -p %T/Inputs/format
+// RUN: mkdir -p %t.dir/Inputs/format
//
// yes.cpp requires formatting after replacements are applied. no.cpp does not.
// The presence of no.cpp ensures that files that don't need formatting still
// have their new state written to disk after applying replacements.
//
-// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/format/yes.cpp > %T/Inputs/format/yes.cpp
-// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/format/no.cpp > %T/Inputs/format/no.cpp
-// RUN: sed "s#\$(path)#%/T/Inputs/format#" %S/Inputs/format/yes.yaml > %T/Inputs/format/yes.yaml
-// RUN: sed "s#\$(path)#%/T/Inputs/format#" %S/Inputs/format/no.yaml > %T/Inputs/format/no.yaml
-// RUN: clang-apply-replacements -format %T/Inputs/format
-// RUN: FileCheck --strict-whitespace -input-file=%T/Inputs/format/yes.cpp %S/Inputs/format/yes.cpp
-// RUN: FileCheck --strict-whitespace -input-file=%T/Inputs/format/no.cpp %S/Inputs/format/no.cpp
+// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/format/yes.cpp > %t.dir/Inputs/format/yes.cpp
+// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/format/no.cpp > %t.dir/Inputs/format/no.cpp
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/format#" %S/Inputs/format/yes.yaml > %t.dir/Inputs/format/yes.yaml
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/format#" %S/Inputs/format/no.yaml > %t.dir/Inputs/format/no.yaml
+// RUN: clang-apply-replacements -format %t.dir/Inputs/format
+// RUN: FileCheck --strict-whitespace -input-file=%t.dir/Inputs/format/yes.cpp %S/Inputs/format/yes.cpp
+// RUN: FileCheck --strict-whitespace -input-file=%t.dir/Inputs/format/no.cpp %S/Inputs/format/no.cpp
//
-// RUN not clang-apply-replacements -format=blah %T/Inputs/format
+// RUN not clang-apply-replacements -format=blah %t.dir/Inputs/format
diff --git a/clang-tools-extra/test/clang-apply-replacements/identical-in-TU.cpp b/clang-tools-extra/test/clang-apply-replacements/identical-in-TU.cpp
index 024db11..df9c4fc 100644
--- a/clang-tools-extra/test/clang-apply-replacements/identical-in-TU.cpp
+++ b/clang-tools-extra/test/clang-apply-replacements/identical-in-TU.cpp
@@ -1,10 +1,10 @@
-// RUN: mkdir -p %T/Inputs/identical-in-TU
+// RUN: mkdir -p %t.dir/Inputs/identical-in-TU
-// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/identical-in-TU/identical-in-TU.cpp > %T/Inputs/identical-in-TU/identical-in-TU.cpp
-// RUN: sed "s#\$(path)#%/T/Inputs/identical-in-TU#" %S/Inputs/identical-in-TU/file1.yaml > %T/Inputs/identical-in-TU/file1.yaml
-// RUN: sed "s#\$(path)#%/T/Inputs/identical-in-TU#" %S/Inputs/identical-in-TU/file2.yaml > %T/Inputs/identical-in-TU/file2.yaml
-// RUN: clang-apply-replacements %T/Inputs/identical-in-TU
-// RUN: FileCheck -input-file=%T/Inputs/identical-in-TU/identical-in-TU.cpp %S/Inputs/identical-in-TU/identical-in-TU.cpp
+// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/identical-in-TU/identical-in-TU.cpp > %t.dir/Inputs/identical-in-TU/identical-in-TU.cpp
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/identical-in-TU#" %S/Inputs/identical-in-TU/file1.yaml > %t.dir/Inputs/identical-in-TU/file1.yaml
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/identical-in-TU#" %S/Inputs/identical-in-TU/file2.yaml > %t.dir/Inputs/identical-in-TU/file2.yaml
+// RUN: clang-apply-replacements %t.dir/Inputs/identical-in-TU
+// RUN: FileCheck -input-file=%t.dir/Inputs/identical-in-TU/identical-in-TU.cpp %S/Inputs/identical-in-TU/identical-in-TU.cpp
// Similar to identical test but each yaml file contains the same fix twice.
// This check ensures that only the duplicated replacements in a single yaml
diff --git a/clang-tools-extra/test/clang-apply-replacements/identical.cpp b/clang-tools-extra/test/clang-apply-replacements/identical.cpp
index ffbf2e3..8a2d1e5 100644
--- a/clang-tools-extra/test/clang-apply-replacements/identical.cpp
+++ b/clang-tools-extra/test/clang-apply-replacements/identical.cpp
@@ -1,6 +1,6 @@
-// RUN: mkdir -p %T/Inputs/identical
-// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/identical/identical.cpp > %T/Inputs/identical/identical.cpp
-// RUN: sed "s#\$(path)#%/T/Inputs/identical#" %S/Inputs/identical/file1.yaml > %T/Inputs/identical/file1.yaml
-// RUN: sed "s#\$(path)#%/T/Inputs/identical#" %S/Inputs/identical/file2.yaml > %T/Inputs/identical/file2.yaml
-// RUN: clang-apply-replacements %T/Inputs/identical
-// RUN: FileCheck -input-file=%T/Inputs/identical/identical.cpp %S/Inputs/identical/identical.cpp
+// RUN: mkdir -p %t.dir/Inputs/identical
+// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/identical/identical.cpp > %t.dir/Inputs/identical/identical.cpp
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/identical#" %S/Inputs/identical/file1.yaml > %t.dir/Inputs/identical/file1.yaml
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/identical#" %S/Inputs/identical/file2.yaml > %t.dir/Inputs/identical/file2.yaml
+// RUN: clang-apply-replacements %t.dir/Inputs/identical
+// RUN: FileCheck -input-file=%t.dir/Inputs/identical/identical.cpp %S/Inputs/identical/identical.cpp
diff --git a/clang-tools-extra/test/clang-apply-replacements/ignore-conflict.cpp b/clang-tools-extra/test/clang-apply-replacements/ignore-conflict.cpp
index 4e681dd..e310256 100644
--- a/clang-tools-extra/test/clang-apply-replacements/ignore-conflict.cpp
+++ b/clang-tools-extra/test/clang-apply-replacements/ignore-conflict.cpp
@@ -1,5 +1,5 @@
-// RUN: mkdir -p %T/Inputs/ignore-conflict
-// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/ignore-conflict/ignore-conflict.cpp > %T/Inputs/ignore-conflict/ignore-conflict.cpp
-// RUN: sed "s#\$(path)#%/T/Inputs/ignore-conflict#" %S/Inputs/ignore-conflict/file1.yaml > %T/Inputs/ignore-conflict/file1.yaml
-// RUN: clang-apply-replacements --ignore-insert-conflict %T/Inputs/ignore-conflict
-// RUN: FileCheck -input-file=%T/Inputs/ignore-conflict/ignore-conflict.cpp %S/Inputs/ignore-conflict/ignore-conflict.cpp
+// RUN: mkdir -p %t.dir/Inputs/ignore-conflict
+// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/ignore-conflict/ignore-conflict.cpp > %t.dir/Inputs/ignore-conflict/ignore-conflict.cpp
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/ignore-conflict#" %S/Inputs/ignore-conflict/file1.yaml > %t.dir/Inputs/ignore-conflict/file1.yaml
+// RUN: clang-apply-replacements --ignore-insert-conflict %t.dir/Inputs/ignore-conflict
+// RUN: FileCheck -input-file=%t.dir/Inputs/ignore-conflict/ignore-conflict.cpp %S/Inputs/ignore-conflict/ignore-conflict.cpp
diff --git a/clang-tools-extra/test/clang-apply-replacements/invalid-files.cpp b/clang-tools-extra/test/clang-apply-replacements/invalid-files.cpp
index b0eb9ef..09efd4c 100644
--- a/clang-tools-extra/test/clang-apply-replacements/invalid-files.cpp
+++ b/clang-tools-extra/test/clang-apply-replacements/invalid-files.cpp
@@ -1,6 +1,6 @@
-// RUN: mkdir -p %T/invalid-files
-// RUN: cp %S/Inputs/invalid-files/invalid-files.yaml %T/invalid-files/invalid-files.yaml
-// RUN: clang-apply-replacements %T/invalid-files
+// RUN: mkdir -p %t.dir/invalid-files
+// RUN: cp %S/Inputs/invalid-files/invalid-files.yaml %t.dir/invalid-files/invalid-files.yaml
+// RUN: clang-apply-replacements %t.dir/invalid-files
//
// Check that the yaml files are *not* deleted after running clang-apply-replacements without remove-change-desc-files.
-// RUN: ls %T/invalid-files/invalid-files.yaml
+// RUN: ls %t.dir/invalid-files/invalid-files.yaml
diff --git a/clang-tools-extra/test/clang-apply-replacements/order-dependent.cpp b/clang-tools-extra/test/clang-apply-replacements/order-dependent.cpp
index 769f4f7..32a3bd1 100644
--- a/clang-tools-extra/test/clang-apply-replacements/order-dependent.cpp
+++ b/clang-tools-extra/test/clang-apply-replacements/order-dependent.cpp
@@ -1,7 +1,7 @@
-// RUN: mkdir -p %T/Inputs/order-dependent
-// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/order-dependent/order-dependent.cpp > %T/Inputs/order-dependent/order-dependent.cpp
-// RUN: sed "s#\$(path)#%/T/Inputs/order-dependent#" %S/Inputs/order-dependent/file1.yaml > %T/Inputs/order-dependent/file1.yaml
-// RUN: sed "s#\$(path)#%/T/Inputs/order-dependent#" %S/Inputs/order-dependent/file2.yaml > %T/Inputs/order-dependent/file2.yaml
-// RUN: sed "s#\$(path)#%/T/Inputs/order-dependent#" %S/Inputs/order-dependent/expected.txt > %T/Inputs/order-dependent/expected.txt
-// RUN: not clang-apply-replacements %T/Inputs/order-dependent > %T/Inputs/order-dependent/output.txt 2>&1
-// RUN: diff -b %T/Inputs/order-dependent/output.txt %T/Inputs/order-dependent/expected.txt
+// RUN: mkdir -p %t.dir/Inputs/order-dependent
+// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/order-dependent/order-dependent.cpp > %t.dir/Inputs/order-dependent/order-dependent.cpp
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/order-dependent#" %S/Inputs/order-dependent/file1.yaml > %t.dir/Inputs/order-dependent/file1.yaml
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/order-dependent#" %S/Inputs/order-dependent/file2.yaml > %t.dir/Inputs/order-dependent/file2.yaml
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/order-dependent#" %S/Inputs/order-dependent/expected.txt > %t.dir/Inputs/order-dependent/expected.txt
+// RUN: not clang-apply-replacements %t.dir/Inputs/order-dependent > %t.dir/Inputs/order-dependent/output.txt 2>&1
+// RUN: diff -b %t.dir/Inputs/order-dependent/output.txt %t.dir/Inputs/order-dependent/expected.txt
diff --git a/clang-tools-extra/test/clang-apply-replacements/relative-paths.cpp b/clang-tools-extra/test/clang-apply-replacements/relative-paths.cpp
index 92cde84..36e3e89 100644
--- a/clang-tools-extra/test/clang-apply-replacements/relative-paths.cpp
+++ b/clang-tools-extra/test/clang-apply-replacements/relative-paths.cpp
@@ -1,7 +1,7 @@
-// RUN: mkdir -p %T/Inputs/relative-paths
-// RUN: mkdir -p %T/Inputs/relative-paths/subdir
-// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/relative-paths/basic.h > %T/Inputs/relative-paths/basic.h
-// RUN: sed "s#\$(path)#%/T/Inputs/relative-paths#" %S/Inputs/relative-paths/file1.yaml > %T/Inputs/relative-paths/file1.yaml
-// RUN: sed "s#\$(path)#%/T/Inputs/relative-paths#" %S/Inputs/relative-paths/file2.yaml > %T/Inputs/relative-paths/file2.yaml
-// RUN: clang-apply-replacements %T/Inputs/relative-paths
-// RUN: FileCheck -input-file=%T/Inputs/relative-paths/basic.h %S/Inputs/relative-paths/basic.h
+// RUN: mkdir -p %t.dir/Inputs/relative-paths
+// RUN: mkdir -p %t.dir/Inputs/relative-paths/subdir
+// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/relative-paths/basic.h > %t.dir/Inputs/relative-paths/basic.h
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/relative-paths#" %S/Inputs/relative-paths/file1.yaml > %t.dir/Inputs/relative-paths/file1.yaml
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/relative-paths#" %S/Inputs/relative-paths/file2.yaml > %t.dir/Inputs/relative-paths/file2.yaml
+// RUN: clang-apply-replacements %t.dir/Inputs/relative-paths
+// RUN: FileCheck -input-file=%t.dir/Inputs/relative-paths/basic.h %S/Inputs/relative-paths/basic.h
diff --git a/clang-tools-extra/test/clang-apply-replacements/yml-basic.cpp b/clang-tools-extra/test/clang-apply-replacements/yml-basic.cpp
index e6ee919..e076ff7 100644
--- a/clang-tools-extra/test/clang-apply-replacements/yml-basic.cpp
+++ b/clang-tools-extra/test/clang-apply-replacements/yml-basic.cpp
@@ -1,17 +1,17 @@
-// RUN: mkdir -p %T/Inputs/yml-basic
-// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/yml-basic/basic.h > %T/Inputs/yml-basic/basic.h
-// RUN: sed "s#\$(path)#%/T/Inputs/yml-basic#" %S/Inputs/yml-basic/file1.yml > %T/Inputs/yml-basic/file1.yml
-// RUN: sed "s#\$(path)#%/T/Inputs/yml-basic#" %S/Inputs/yml-basic/file2.yml > %T/Inputs/yml-basic/file2.yml
-// RUN: clang-apply-replacements %T/Inputs/yml-basic
-// RUN: FileCheck -input-file=%T/Inputs/yml-basic/basic.h %S/Inputs/yml-basic/basic.h
+// RUN: mkdir -p %t.dir/Inputs/yml-basic
+// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/yml-basic/basic.h > %t.dir/Inputs/yml-basic/basic.h
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/yml-basic#" %S/Inputs/yml-basic/file1.yml > %t.dir/Inputs/yml-basic/file1.yml
+// RUN: sed "s#\$(path)#%/t.dir/Inputs/yml-basic#" %S/Inputs/yml-basic/file2.yml > %t.dir/Inputs/yml-basic/file2.yml
+// RUN: clang-apply-replacements %t.dir/Inputs/yml-basic
+// RUN: FileCheck -input-file=%t.dir/Inputs/yml-basic/basic.h %S/Inputs/yml-basic/basic.h
//
// Check that the yml files are *not* deleted after running clang-apply-replacements without remove-change-desc-files.
-// RUN: ls -1 %T/Inputs/yml-basic | FileCheck %s --check-prefix=YML
+// RUN: ls -1 %t.dir/Inputs/yml-basic | FileCheck %s --check-prefix=YML
//
// Check that the yml files *are* deleted after running clang-apply-replacements with remove-change-desc-files.
-// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/yml-basic/basic.h > %T/Inputs/yml-basic/basic.h
-// RUN: clang-apply-replacements -remove-change-desc-files %T/Inputs/yml-basic
-// RUN: ls -1 %T/Inputs/yml-basic | FileCheck %s --check-prefix=NO_YML
+// RUN: grep -Ev "// *[A-Z-]+:" %S/Inputs/yml-basic/basic.h > %t.dir/Inputs/yml-basic/basic.h
+// RUN: clang-apply-replacements -remove-change-desc-files %t.dir/Inputs/yml-basic
+// RUN: ls -1 %t.dir/Inputs/yml-basic | FileCheck %s --check-prefix=NO_YML
//
// YML: {{^file.\.yml$}}
// NO_YML-NOT: {{^file.\.yml$}}
diff --git a/clang-tools-extra/test/clang-change-namespace/allow-list.cpp b/clang-tools-extra/test/clang-change-namespace/allow-list.cpp
index 7a941dcb..3b0d5b9 100644
--- a/clang-tools-extra/test/clang-change-namespace/allow-list.cpp
+++ b/clang-tools-extra/test/clang-change-namespace/allow-list.cpp
@@ -1,5 +1,5 @@
-// RUN: echo "^std::.*$" > %T/allow-list.txt
-// RUN: clang-change-namespace -old_namespace "na::nb" -new_namespace "x::y" --file_pattern ".*" --allowed_file %T/allow-list.txt %s -- | sed 's,// CHECK.*,,' | FileCheck %s
+// RUN: echo "^std::.*$" > %t.allow-list.txt
+// RUN: clang-change-namespace -old_namespace "na::nb" -new_namespace "x::y" --file_pattern ".*" --allowed_file %t.allow-list.txt %s -- | sed 's,// CHECK.*,,' | FileCheck %s
#include "Inputs/fake-std.h"
diff --git a/clang-tools-extra/test/clang-change-namespace/macro.cpp b/clang-tools-extra/test/clang-change-namespace/macro.cpp
index 40c4caf..f0b134d 100644
--- a/clang-tools-extra/test/clang-change-namespace/macro.cpp
+++ b/clang-tools-extra/test/clang-change-namespace/macro.cpp
@@ -1,15 +1,16 @@
-// RUN: cp %S/macro.cpp %T/macro.cpp
-// RUN: echo "#define USING using na::nc::X" > %T/macro.h
+// RUN: mkdir -p %t.dir
+// RUN: cp %S/macro.cpp %t.dir/macro.cpp
+// RUN: echo "#define USING using na::nc::X" > %t.dir/macro.h
//
-// RUN: clang-change-namespace -old_namespace "na::nb" -new_namespace "x::y" --file_pattern "macro.cpp$" --i %T/macro.cpp --
-// RUN: FileCheck -input-file=%T/macro.cpp -check-prefix=CHECK-CC %s
-// RUN: FileCheck -input-file=%T/macro.h -check-prefix=CHECK-HEADER %s
+// RUN: clang-change-namespace -old_namespace "na::nb" -new_namespace "x::y" --file_pattern "macro.cpp$" --i %t.dir/macro.cpp --
+// RUN: FileCheck -input-file=%t.dir/macro.cpp -check-prefix=CHECK-CC %s
+// RUN: FileCheck -input-file=%t.dir/macro.h -check-prefix=CHECK-HEADER %s
//
-// RUN: cp %S/macro.cpp %T/macro.cpp
-// RUN: echo "#define USING using na::nc::X" > %T/macro.h
-// RUN: clang-change-namespace -old_namespace "na::nb" -new_namespace "x::y" --file_pattern ".*" --i %T/macro.cpp --
-// RUN: FileCheck -input-file=%T/macro.cpp -check-prefix=CHECK-CC %s
-// RUN: FileCheck -input-file=%T/macro.h -check-prefix=CHECK-CHANGED-HEADER %s
+// RUN: cp %S/macro.cpp %t.dir/macro.cpp
+// RUN: echo "#define USING using na::nc::X" > %t.dir/macro.h
+// RUN: clang-change-namespace -old_namespace "na::nb" -new_namespace "x::y" --file_pattern ".*" --i %t.dir/macro.cpp --
+// RUN: FileCheck -input-file=%t.dir/macro.cpp -check-prefix=CHECK-CC %s
+// RUN: FileCheck -input-file=%t.dir/macro.h -check-prefix=CHECK-CHANGED-HEADER %s
#include "macro.h"
namespace na { namespace nc { class X{}; } }
diff --git a/clang-tools-extra/test/clang-include-fixer/include_path.cpp b/clang-tools-extra/test/clang-include-fixer/include_path.cpp
index 9185b7a..a6f4a45 100644
--- a/clang-tools-extra/test/clang-include-fixer/include_path.cpp
+++ b/clang-tools-extra/test/clang-include-fixer/include_path.cpp
@@ -1,18 +1,18 @@
-// RUN: mkdir -p %T/clang-include-fixer/include
-// RUN: mkdir -p %T/clang-include-fixer/symbols
-// RUN: mkdir -p %T/clang-include-fixer/build
-// RUN: mkdir -p %T/clang-include-fixer/src
-// RUN: sed 's|test_dir|%/T/clang-include-fixer|g' %S/Inputs/database_template.json > %T/clang-include-fixer/build/compile_commands.json
-// RUN: echo -e '#include "bar.h"\nb::a::bar f;' > %T/clang-include-fixer/src/bar.cpp
-// RUN: echo 'namespace b { namespace a { class bar {}; } }' > %T/clang-include-fixer/include/bar.h
-// RUN: cd %T/clang-include-fixer/build
-// RUN: find-all-symbols -output-dir=%T/clang-include-fixer/symbols -p=. %T/clang-include-fixer/src/bar.cpp
-// RUN: find-all-symbols -merge-dir=%T/clang-include-fixer/symbols %T/clang-include-fixer/build/find_all_symbols.yaml
-// RUN: FileCheck -input-file=%T/clang-include-fixer/build/find_all_symbols.yaml -check-prefix=CHECK-YAML %s
+// RUN: mkdir -p %t.dir/clang-include-fixer/include
+// RUN: mkdir -p %t.dir/clang-include-fixer/symbols
+// RUN: mkdir -p %t.dir/clang-include-fixer/build
+// RUN: mkdir -p %t.dir/clang-include-fixer/src
+// RUN: sed 's|test_dir|%/t.dir/clang-include-fixer|g' %S/Inputs/database_template.json > %t.dir/clang-include-fixer/build/compile_commands.json
+// RUN: echo -e '#include "bar.h"\nb::a::bar f;' > %t.dir/clang-include-fixer/src/bar.cpp
+// RUN: echo 'namespace b { namespace a { class bar {}; } }' > %t.dir/clang-include-fixer/include/bar.h
+// RUN: cd %t.dir/clang-include-fixer/build
+// RUN: find-all-symbols -output-dir=%t.dir/clang-include-fixer/symbols -p=. %t.dir/clang-include-fixer/src/bar.cpp
+// RUN: find-all-symbols -merge-dir=%t.dir/clang-include-fixer/symbols %t.dir/clang-include-fixer/build/find_all_symbols.yaml
+// RUN: FileCheck -input-file=%t.dir/clang-include-fixer/build/find_all_symbols.yaml -check-prefix=CHECK-YAML %s
//
-// RUN: echo 'b::a::bar f;' > %T/clang-include-fixer/src/bar.cpp
-// RUN: clang-include-fixer -db=yaml -input=%T/clang-include-fixer/build/find_all_symbols.yaml -minimize-paths=true -p=. %T/clang-include-fixer/src/bar.cpp
-// RUN: FileCheck -input-file=%T/clang-include-fixer/src/bar.cpp %s
+// RUN: echo 'b::a::bar f;' > %t.dir/clang-include-fixer/src/bar.cpp
+// RUN: clang-include-fixer -db=yaml -input=%t.dir/clang-include-fixer/build/find_all_symbols.yaml -minimize-paths=true -p=. %t.dir/clang-include-fixer/src/bar.cpp
+// RUN: FileCheck -input-file=%t.dir/clang-include-fixer/src/bar.cpp %s
// CHECK-YAML: ..{{[/\\]}}include{{[/\\]}}bar.h
// CHECK: #include "bar.h"
diff --git a/clang-tools-extra/test/clang-include-fixer/multiple_fixes.cpp b/clang-tools-extra/test/clang-include-fixer/multiple_fixes.cpp
index 791417a..6c82e2a 100644
--- a/clang-tools-extra/test/clang-include-fixer/multiple_fixes.cpp
+++ b/clang-tools-extra/test/clang-include-fixer/multiple_fixes.cpp
@@ -1,11 +1,11 @@
// REQUIRES: shell
// RUN: sed -e 's#//.*$##' %s > %t.cpp
-// RUN: mkdir -p %T/clang-include-fixer/multiple-fixes
-// RUN: echo 'foo f;' > %T/clang-include-fixer/multiple-fixes/foo.cpp
-// RUN: echo 'bar b;' > %T/clang-include-fixer/multiple-fixes/bar.cpp
-// RUN: clang-include-fixer -db=fixed -input='foo= "foo.h";bar= "bar.h"' %T/clang-include-fixer/multiple-fixes/*.cpp --
-// RUN: FileCheck -input-file=%T/clang-include-fixer/multiple-fixes/bar.cpp %s -check-prefix=CHECK-BAR
-// RUN: FileCheck -input-file=%T/clang-include-fixer/multiple-fixes/foo.cpp %s -check-prefix=CHECK-FOO
+// RUN: mkdir -p %t.dir/clang-include-fixer/multiple-fixes
+// RUN: echo 'foo f;' > %t.dir/clang-include-fixer/multiple-fixes/foo.cpp
+// RUN: echo 'bar b;' > %t.dir/clang-include-fixer/multiple-fixes/bar.cpp
+// RUN: clang-include-fixer -db=fixed -input='foo= "foo.h";bar= "bar.h"' %t.dir/clang-include-fixer/multiple-fixes/*.cpp --
+// RUN: FileCheck -input-file=%t.dir/clang-include-fixer/multiple-fixes/bar.cpp %s -check-prefix=CHECK-BAR
+// RUN: FileCheck -input-file=%t.dir/clang-include-fixer/multiple-fixes/foo.cpp %s -check-prefix=CHECK-FOO
//
// CHECK-FOO: #include "foo.h"
// CHECK-FOO: foo f;
diff --git a/clang-tools-extra/test/clang-include-fixer/yamldb_autodetect.cpp b/clang-tools-extra/test/clang-include-fixer/yamldb_autodetect.cpp
index 1997390..1978e5d 100644
--- a/clang-tools-extra/test/clang-include-fixer/yamldb_autodetect.cpp
+++ b/clang-tools-extra/test/clang-include-fixer/yamldb_autodetect.cpp
@@ -1,6 +1,6 @@
-// RUN: mkdir -p %T/foo/bar
-// RUN: cp %p/Inputs/fake_yaml_db.yaml %T/find_all_symbols_db.yaml
-// RUN: cd %T/foo
+// RUN: mkdir -p %t.dir/foo/bar
+// RUN: cp %p/Inputs/fake_yaml_db.yaml %t.dir/find_all_symbols_db.yaml
+// RUN: cd %t.dir/foo
// RUN: sed -e 's#//.*$##' %s > bar/test.cpp
// RUN: clang-include-fixer -db=yaml bar/test.cpp --
// RUN: FileCheck %s -input-file=bar/test.cpp
diff --git a/clang-tools-extra/test/clang-move/move-class.cpp b/clang-tools-extra/test/clang-move/move-class.cpp
index a30cb4d..5fb0258 100644
--- a/clang-tools-extra/test/clang-move/move-class.cpp
+++ b/clang-tools-extra/test/clang-move/move-class.cpp
@@ -1,25 +1,25 @@
-// RUN: mkdir -p %T/clang-move/build
-// RUN: mkdir -p %T/clang-move/include
-// RUN: mkdir -p %T/clang-move/src
-// RUN: sed 's|$test_dir|%/T/clang-move|g' %S/Inputs/database_template.json > %T/clang-move/compile_commands.json
-// RUN: cp %S/Inputs/test.h %T/clang-move/include
-// RUN: cp %S/Inputs/test.cpp %T/clang-move/src
-// RUN: touch %T/clang-move/include/test2.h
-// RUN: cd %T/clang-move/build
-// RUN: clang-move -names="a::Foo" -new_cc=%T/clang-move/new_test.cpp -new_header=%T/clang-move/new_test.h -old_cc=../src/test.cpp -old_header=../include/test.h %T/clang-move/src/test.cpp
-// RUN: FileCheck -input-file=%T/clang-move/new_test.cpp -check-prefix=CHECK-NEW-TEST-CPP %s
-// RUN: FileCheck -input-file=%T/clang-move/new_test.h -check-prefix=CHECK-NEW-TEST-H %s
-// RUN: FileCheck -input-file=%T/clang-move/src/test.cpp -check-prefix=CHECK-OLD-TEST-EMPTY -allow-empty %s
-// RUN: FileCheck -input-file=%T/clang-move/include/test.h -check-prefix=CHECK-OLD-TEST-EMPTY -allow-empty %s
+// RUN: mkdir -p %t.dir/clang-move/build
+// RUN: mkdir -p %t.dir/clang-move/include
+// RUN: mkdir -p %t.dir/clang-move/src
+// RUN: sed 's|$test_dir|%/t.dir/clang-move|g' %S/Inputs/database_template.json > %t.dir/clang-move/compile_commands.json
+// RUN: cp %S/Inputs/test.h %t.dir/clang-move/include
+// RUN: cp %S/Inputs/test.cpp %t.dir/clang-move/src
+// RUN: touch %t.dir/clang-move/include/test2.h
+// RUN: cd %t.dir/clang-move/build
+// RUN: clang-move -names="a::Foo" -new_cc=%t.dir/clang-move/new_test.cpp -new_header=%t.dir/clang-move/new_test.h -old_cc=../src/test.cpp -old_header=../include/test.h %t.dir/clang-move/src/test.cpp
+// RUN: FileCheck -input-file=%t.dir/clang-move/new_test.cpp -check-prefix=CHECK-NEW-TEST-CPP %s
+// RUN: FileCheck -input-file=%t.dir/clang-move/new_test.h -check-prefix=CHECK-NEW-TEST-H %s
+// RUN: FileCheck -input-file=%t.dir/clang-move/src/test.cpp -check-prefix=CHECK-OLD-TEST-EMPTY -allow-empty %s
+// RUN: FileCheck -input-file=%t.dir/clang-move/include/test.h -check-prefix=CHECK-OLD-TEST-EMPTY -allow-empty %s
//
-// RUN: cp %S/Inputs/test.h %T/clang-move/include
-// RUN: cp %S/Inputs/test.cpp %T/clang-move/src
-// RUN: cd %T/clang-move/build
-// RUN: clang-move -names="a::Foo" -new_cc=%T/clang-move/new_test.cpp -new_header=%T/clang-move/new_test.h -old_cc=%T/clang-move/src/test.cpp -old_header=%T/clang-move/include/test.h %T/clang-move/src/test.cpp
-// RUN: FileCheck -input-file=%T/clang-move/new_test.cpp -check-prefix=CHECK-NEW-TEST-CPP %s
-// RUN: FileCheck -input-file=%T/clang-move/new_test.h -check-prefix=CHECK-NEW-TEST-H %s
-// RUN: FileCheck -input-file=%T/clang-move/src/test.cpp -check-prefix=CHECK-OLD-TEST-EMPTY -allow-empty %s
-// RUN: FileCheck -input-file=%T/clang-move/include/test.h -check-prefix=CHECK-OLD-TEST-EMPTY -allow-empty %s
+// RUN: cp %S/Inputs/test.h %t.dir/clang-move/include
+// RUN: cp %S/Inputs/test.cpp %t.dir/clang-move/src
+// RUN: cd %t.dir/clang-move/build
+// RUN: clang-move -names="a::Foo" -new_cc=%t.dir/clang-move/new_test.cpp -new_header=%t.dir/clang-move/new_test.h -old_cc=%t.dir/clang-move/src/test.cpp -old_header=%t.dir/clang-move/include/test.h %t.dir/clang-move/src/test.cpp
+// RUN: FileCheck -input-file=%t.dir/clang-move/new_test.cpp -check-prefix=CHECK-NEW-TEST-CPP %s
+// RUN: FileCheck -input-file=%t.dir/clang-move/new_test.h -check-prefix=CHECK-NEW-TEST-H %s
+// RUN: FileCheck -input-file=%t.dir/clang-move/src/test.cpp -check-prefix=CHECK-OLD-TEST-EMPTY -allow-empty %s
+// RUN: FileCheck -input-file=%t.dir/clang-move/include/test.h -check-prefix=CHECK-OLD-TEST-EMPTY -allow-empty %s
//
//
// CHECK-NEW-TEST-H: #ifndef TEST_H // comment 1
diff --git a/clang-tools-extra/test/clang-move/move-enum-decl.cpp b/clang-tools-extra/test/clang-move/move-enum-decl.cpp
index 42f6f99..f8fb5f6 100644
--- a/clang-tools-extra/test/clang-move/move-enum-decl.cpp
+++ b/clang-tools-extra/test/clang-move/move-enum-decl.cpp
@@ -1,14 +1,14 @@
-// RUN: mkdir -p %T/move-enum
-// RUN: cp %S/Inputs/enum.h %T/move-enum/enum.h
-// RUN: echo '#include "enum.h"' > %T/move-enum/enum.cpp
-// RUN: cd %T/move-enum
+// RUN: mkdir -p %t.dir/move-enum
+// RUN: cp %S/Inputs/enum.h %t.dir/move-enum/enum.h
+// RUN: echo '#include "enum.h"' > %t.dir/move-enum/enum.cpp
+// RUN: cd %t.dir/move-enum
//
// -----------------------------------------------------------------------------
// Test moving enum declarations.
// -----------------------------------------------------------------------------
-// RUN: clang-move -names="a::E1" -new_cc=%T/move-enum/new_test.cpp -new_header=%T/move-enum/new_test.h -old_cc=%T/move-enum/enum.cpp -old_header=%T/move-enum/enum.h %T/move-enum/enum.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/move-enum/new_test.h -check-prefix=CHECK-NEW-TEST-H-CASE1 %s
-// RUN: FileCheck -input-file=%T/move-enum/enum.h -check-prefix=CHECK-OLD-TEST-H-CASE1 %s
+// RUN: clang-move -names="a::E1" -new_cc=%t.dir/move-enum/new_test.cpp -new_header=%t.dir/move-enum/new_test.h -old_cc=%t.dir/move-enum/enum.cpp -old_header=%t.dir/move-enum/enum.h %t.dir/move-enum/enum.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/move-enum/new_test.h -check-prefix=CHECK-NEW-TEST-H-CASE1 %s
+// RUN: FileCheck -input-file=%t.dir/move-enum/enum.h -check-prefix=CHECK-OLD-TEST-H-CASE1 %s
//
// CHECK-NEW-TEST-H-CASE1: namespace a {
// CHECK-NEW-TEST-H-CASE1-NEXT: enum E1 { Green, Red };
@@ -20,11 +20,11 @@
// -----------------------------------------------------------------------------
// Test moving scoped enum declarations.
// -----------------------------------------------------------------------------
-// RUN: cp %S/Inputs/enum.h %T/move-enum/enum.h
-// RUN: echo '#include "enum.h"' > %T/move-enum/enum.cpp
-// RUN: clang-move -names="a::E2" -new_cc=%T/move-enum/new_test.cpp -new_header=%T/move-enum/new_test.h -old_cc=%T/move-enum/enum.cpp -old_header=%T/move-enum/enum.h %T/move-enum/enum.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/move-enum/new_test.h -check-prefix=CHECK-NEW-TEST-H-CASE2 %s
-// RUN: FileCheck -input-file=%T/move-enum/enum.h -check-prefix=CHECK-OLD-TEST-H-CASE2 %s
+// RUN: cp %S/Inputs/enum.h %t.dir/move-enum/enum.h
+// RUN: echo '#include "enum.h"' > %t.dir/move-enum/enum.cpp
+// RUN: clang-move -names="a::E2" -new_cc=%t.dir/move-enum/new_test.cpp -new_header=%t.dir/move-enum/new_test.h -old_cc=%t.dir/move-enum/enum.cpp -old_header=%t.dir/move-enum/enum.h %t.dir/move-enum/enum.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/move-enum/new_test.h -check-prefix=CHECK-NEW-TEST-H-CASE2 %s
+// RUN: FileCheck -input-file=%t.dir/move-enum/enum.h -check-prefix=CHECK-OLD-TEST-H-CASE2 %s
// CHECK-NEW-TEST-H-CASE2: namespace a {
// CHECK-NEW-TEST-H-CASE2-NEXT: enum class E2 { Yellow };
@@ -36,9 +36,9 @@
// -----------------------------------------------------------------------------
// Test not moving class-insided enum declarations.
// -----------------------------------------------------------------------------
-// RUN: cp %S/Inputs/enum.h %T/move-enum/enum.h
-// RUN: echo '#include "enum.h"' > %T/move-enum/enum.cpp
-// RUN: clang-move -names="a::C::E3" -new_cc=%T/move-enum/new_test.cpp -new_header=%T/move-enum/new_test.h -old_cc=%T/move-enum/enum.cpp -old_header=%T/move-enum/enum.h %T/move-enum/enum.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/move-enum/new_test.h -allow-empty -check-prefix=CHECK-EMPTY %s
+// RUN: cp %S/Inputs/enum.h %t.dir/move-enum/enum.h
+// RUN: echo '#include "enum.h"' > %t.dir/move-enum/enum.cpp
+// RUN: clang-move -names="a::C::E3" -new_cc=%t.dir/move-enum/new_test.cpp -new_header=%t.dir/move-enum/new_test.h -old_cc=%t.dir/move-enum/enum.cpp -old_header=%t.dir/move-enum/enum.h %t.dir/move-enum/enum.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/move-enum/new_test.h -allow-empty -check-prefix=CHECK-EMPTY %s
// CHECK-EMPTY: {{^}}{{$}}
diff --git a/clang-tools-extra/test/clang-move/move-function.cpp b/clang-tools-extra/test/clang-move/move-function.cpp
index 0324b80..a52d55c 100644
--- a/clang-tools-extra/test/clang-move/move-function.cpp
+++ b/clang-tools-extra/test/clang-move/move-function.cpp
@@ -1,9 +1,9 @@
-// RUN: mkdir -p %T/move-function
-// RUN: cat %S/Inputs/function_test.h > %T/move-function/function_test.h
-// RUN: cat %S/Inputs/function_test.cpp > %T/move-function/function_test.cpp
-// RUN: cd %T/move-function
-// RUN: clang-move -names="g" -new_header=%T/move-function/new_function_test.h -old_header=../move-function/function_test.h %T/move-function/function_test.cpp --
-// RUN: FileCheck -input-file=%T/move-function/new_function_test.h -check-prefix=CHECK-NEW-TEST-H-CASE1 %s
+// RUN: mkdir -p %t.dir/move-function
+// RUN: cat %S/Inputs/function_test.h > %t.dir/move-function/function_test.h
+// RUN: cat %S/Inputs/function_test.cpp > %t.dir/move-function/function_test.cpp
+// RUN: cd %t.dir/move-function
+// RUN: clang-move -names="g" -new_header=%t.dir/move-function/new_function_test.h -old_header=../move-function/function_test.h %t.dir/move-function/function_test.cpp --
+// RUN: FileCheck -input-file=%t.dir/move-function/new_function_test.h -check-prefix=CHECK-NEW-TEST-H-CASE1 %s
//
// CHECK-NEW-TEST-H-CASE1: #ifndef {{.*}}NEW_FUNCTION_TEST_H
// CHECK-NEW-TEST-H-CASE1: #define {{.*}}NEW_FUNCTION_TEST_H
@@ -12,9 +12,9 @@
// CHECK-NEW-TEST-H-CASE1: {{[[:space:]]+}}
// CHECK-NEW-TEST-H-CASE1: #endif // {{.*}}NEW_FUNCTION_TEST_H
//
-// RUN: cp %S/Inputs/function_test* %T/move-function
-// RUN: clang-move -names="h" -new_header=%T/move-function/new_function_test.h -old_header=../move-function/function_test.h %T/move-function/function_test.cpp --
-// RUN: FileCheck -input-file=%T/move-function/new_function_test.h -check-prefix=CHECK-NEW-TEST-H-CASE2 %s
+// RUN: cp %S/Inputs/function_test* %t.dir/move-function
+// RUN: clang-move -names="h" -new_header=%t.dir/move-function/new_function_test.h -old_header=../move-function/function_test.h %t.dir/move-function/function_test.cpp --
+// RUN: FileCheck -input-file=%t.dir/move-function/new_function_test.h -check-prefix=CHECK-NEW-TEST-H-CASE2 %s
//
// CHECK-NEW-TEST-H-CASE2: #ifndef {{.*}}NEW_FUNCTION_TEST_H
// CHECK-NEW-TEST-H-CASE2: #define {{.*}}NEW_FUNCTION_TEST_H
@@ -25,10 +25,10 @@
// CHECK-NEW-TEST-H-CASE2: {{[[:space:]]+}}
// CHECK-NEW-TEST-H-CASE2: #endif // {{.*}}NEW_FUNCTION_TEST_H
//
-// RUN: cp %S/Inputs/function_test* %T/move-function
-// RUN: clang-move -names="f" -new_header=%T/move-function/new_function_test.h -new_cc=%T/move-function/new_function_test.cpp -old_header=../move-function/function_test.h -old_cc=../move-function/function_test.cpp %T/move-function/function_test.cpp --
-// RUN: FileCheck -input-file=%T/move-function/new_function_test.h -check-prefix=CHECK-NEW-TEST-H-CASE3 %s
-// RUN: FileCheck -input-file=%T/move-function/new_function_test.cpp -check-prefix=CHECK-NEW-TEST-CPP-CASE3 %s
+// RUN: cp %S/Inputs/function_test* %t.dir/move-function
+// RUN: clang-move -names="f" -new_header=%t.dir/move-function/new_function_test.h -new_cc=%t.dir/move-function/new_function_test.cpp -old_header=../move-function/function_test.h -old_cc=../move-function/function_test.cpp %t.dir/move-function/function_test.cpp --
+// RUN: FileCheck -input-file=%t.dir/move-function/new_function_test.h -check-prefix=CHECK-NEW-TEST-H-CASE3 %s
+// RUN: FileCheck -input-file=%t.dir/move-function/new_function_test.cpp -check-prefix=CHECK-NEW-TEST-CPP-CASE3 %s
//
// CHECK-NEW-TEST-H-CASE3: #ifndef {{.*}}NEW_FUNCTION_TEST_H
// CHECK-NEW-TEST-H-CASE3: #define {{.*}}NEW_FUNCTION_TEST_H
@@ -40,17 +40,17 @@
// CHECK-NEW-TEST-CPP-CASE3: {{[[:space:]]+}}
// CHECK-NEW-TEST-CPP-CASE3: void f() {}
//
-// RUN: cat %S/Inputs/function_test.h > %T/move-function/function_test.h
-// RUN: cat %S/Inputs/function_test.cpp > %T/move-function/function_test.cpp
-// RUN: clang-move -names="A::f" -new_header=%T/move-function/new_function_test.h -new_cc=%T/move-function/new_function_test.cpp -old_header=../move-function/function_test.h -old_cc=../move-function/function_test.cpp %T/move-function/function_test.cpp -dump_result -- | FileCheck %s -check-prefix=CHECK-EMPTY
+// RUN: cat %S/Inputs/function_test.h > %t.dir/move-function/function_test.h
+// RUN: cat %S/Inputs/function_test.cpp > %t.dir/move-function/function_test.cpp
+// RUN: clang-move -names="A::f" -new_header=%t.dir/move-function/new_function_test.h -new_cc=%t.dir/move-function/new_function_test.cpp -old_header=../move-function/function_test.h -old_cc=../move-function/function_test.cpp %t.dir/move-function/function_test.cpp -dump_result -- | FileCheck %s -check-prefix=CHECK-EMPTY
//
// CHECK-EMPTY: [{{[[:space:]]*}}]
//
-// RUN: cat %S/Inputs/function_test.h > %T/move-function/function_test.h
-// RUN: cat %S/Inputs/function_test.cpp > %T/move-function/function_test.cpp
-// RUN: clang-move -names="f,A" -new_header=%T/move-function/new_function_test.h -new_cc=%T/move-function/new_function_test.cpp -old_header=../move-function/function_test.h -old_cc=../move-function/function_test.cpp %T/move-function/function_test.cpp --
-// RUN: FileCheck -input-file=%T/move-function/new_function_test.h -check-prefix=CHECK-NEW-TEST-H-CASE4 %s
-// RUN: FileCheck -input-file=%T/move-function/new_function_test.cpp -check-prefix=CHECK-NEW-TEST-CPP-CASE4 %s
+// RUN: cat %S/Inputs/function_test.h > %t.dir/move-function/function_test.h
+// RUN: cat %S/Inputs/function_test.cpp > %t.dir/move-function/function_test.cpp
+// RUN: clang-move -names="f,A" -new_header=%t.dir/move-function/new_function_test.h -new_cc=%t.dir/move-function/new_function_test.cpp -old_header=../move-function/function_test.h -old_cc=../move-function/function_test.cpp %t.dir/move-function/function_test.cpp --
+// RUN: FileCheck -input-file=%t.dir/move-function/new_function_test.h -check-prefix=CHECK-NEW-TEST-H-CASE4 %s
+// RUN: FileCheck -input-file=%t.dir/move-function/new_function_test.cpp -check-prefix=CHECK-NEW-TEST-CPP-CASE4 %s
// CHECK-NEW-TEST-H-CASE4: #ifndef {{.*}}NEW_FUNCTION_TEST_H
// CHECK-NEW-TEST-H-CASE4: #define {{.*}}NEW_FUNCTION_TEST_H
diff --git a/clang-tools-extra/test/clang-move/move-multiple-classes.cpp b/clang-tools-extra/test/clang-move/move-multiple-classes.cpp
index 821d567..513b904 100644
--- a/clang-tools-extra/test/clang-move/move-multiple-classes.cpp
+++ b/clang-tools-extra/test/clang-move/move-multiple-classes.cpp
@@ -1,12 +1,12 @@
-// RUN: mkdir -p %T/move-multiple-classes
-// RUN: cp %S/Inputs/multiple_class_test* %T/move-multiple-classes/
-// RUN: cd %T/move-multiple-classes
-// RUN: clang-move -names="c::EnclosingMove5::Nested" -new_cc=%T/move-multiple-classes/new_multiple_class_test.cpp -new_header=%T/move-multiple-classes/new_multiple_class_test.h -old_cc=%T/move-multiple-classes/multiple_class_test.cpp -old_header=../move-multiple-classes/multiple_class_test.h -dump_result %T/move-multiple-classes/multiple_class_test.cpp -- -std=c++11| FileCheck %s -check-prefix=CHECK-EMPTY
-// RUN: clang-move -names="a::Move1, b::Move2,c::Move3,c::Move4,c::EnclosingMove5" -new_cc=%T/move-multiple-classes/new_multiple_class_test.cpp -new_header=%T/move-multiple-classes/new_multiple_class_test.h -old_cc=%T/move-multiple-classes/multiple_class_test.cpp -old_header=../move-multiple-classes/multiple_class_test.h %T/move-multiple-classes/multiple_class_test.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/move-multiple-classes/new_multiple_class_test.cpp -check-prefix=CHECK-NEW-TEST-CPP %s
-// RUN: FileCheck -input-file=%T/move-multiple-classes/new_multiple_class_test.h -check-prefix=CHECK-NEW-TEST-H %s
-// RUN: FileCheck -input-file=%T/move-multiple-classes/multiple_class_test.cpp -check-prefix=CHECK-OLD-TEST-CPP %s
-// RUN: FileCheck -input-file=%T/move-multiple-classes/multiple_class_test.h -check-prefix=CHECK-OLD-TEST-H %s
+// RUN: mkdir -p %t.dir/move-multiple-classes
+// RUN: cp %S/Inputs/multiple_class_test* %t.dir/move-multiple-classes/
+// RUN: cd %t.dir/move-multiple-classes
+// RUN: clang-move -names="c::EnclosingMove5::Nested" -new_cc=%t.dir/move-multiple-classes/new_multiple_class_test.cpp -new_header=%t.dir/move-multiple-classes/new_multiple_class_test.h -old_cc=%t.dir/move-multiple-classes/multiple_class_test.cpp -old_header=../move-multiple-classes/multiple_class_test.h -dump_result %t.dir/move-multiple-classes/multiple_class_test.cpp -- -std=c++11| FileCheck %s -check-prefix=CHECK-EMPTY
+// RUN: clang-move -names="a::Move1, b::Move2,c::Move3,c::Move4,c::EnclosingMove5" -new_cc=%t.dir/move-multiple-classes/new_multiple_class_test.cpp -new_header=%t.dir/move-multiple-classes/new_multiple_class_test.h -old_cc=%t.dir/move-multiple-classes/multiple_class_test.cpp -old_header=../move-multiple-classes/multiple_class_test.h %t.dir/move-multiple-classes/multiple_class_test.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/move-multiple-classes/new_multiple_class_test.cpp -check-prefix=CHECK-NEW-TEST-CPP %s
+// RUN: FileCheck -input-file=%t.dir/move-multiple-classes/new_multiple_class_test.h -check-prefix=CHECK-NEW-TEST-H %s
+// RUN: FileCheck -input-file=%t.dir/move-multiple-classes/multiple_class_test.cpp -check-prefix=CHECK-OLD-TEST-CPP %s
+// RUN: FileCheck -input-file=%t.dir/move-multiple-classes/multiple_class_test.h -check-prefix=CHECK-OLD-TEST-H %s
//
// CHECK-EMPTY: [{{[[:space:]]*}}]
//
diff --git a/clang-tools-extra/test/clang-move/move-template-class.cpp b/clang-tools-extra/test/clang-move/move-template-class.cpp
index 1a6a60b..29ed65e 100644
--- a/clang-tools-extra/test/clang-move/move-template-class.cpp
+++ b/clang-tools-extra/test/clang-move/move-template-class.cpp
@@ -1,18 +1,18 @@
-// RUN: mkdir -p %T/move-template-class
-// RUN: cp %S/Inputs/template_class_test* %T/move-template-class
-// RUN: cd %T/move-template-class
-// RUN: clang-move -names="A,B" -new_cc=%T/move-template-class/new_template_class_test.cpp -new_header=%T/move-template-class/new_template_class_test.h -old_cc=%T/move-template-class/template_class_test.cpp -old_header=../move-template-class/template_class_test.h %T/move-template-class/template_class_test.cpp --
-// RUN: FileCheck -input-file=%T/move-template-class/template_class_test.cpp -check-prefix=CHECK-OLD-TEST-EMPTY -allow-empty %s
-// RUN: FileCheck -input-file=%T/move-template-class/template_class_test.h -check-prefix=CHECK-OLD-TEST-EMPTY -allow-empty %s
-// RUN: FileCheck -input-file=%T/move-template-class/new_template_class_test.cpp -check-prefix=CHECK-NEW-TEST-CPP-CASE1 %s
-// RUN: FileCheck -input-file=%T/move-template-class/new_template_class_test.h -check-prefix=CHECK-NEW-TEST-H-CASE1 %s
+// RUN: mkdir -p %t.dir/move-template-class
+// RUN: cp %S/Inputs/template_class_test* %t.dir/move-template-class
+// RUN: cd %t.dir/move-template-class
+// RUN: clang-move -names="A,B" -new_cc=%t.dir/move-template-class/new_template_class_test.cpp -new_header=%t.dir/move-template-class/new_template_class_test.h -old_cc=%t.dir/move-template-class/template_class_test.cpp -old_header=../move-template-class/template_class_test.h %t.dir/move-template-class/template_class_test.cpp --
+// RUN: FileCheck -input-file=%t.dir/move-template-class/template_class_test.cpp -check-prefix=CHECK-OLD-TEST-EMPTY -allow-empty %s
+// RUN: FileCheck -input-file=%t.dir/move-template-class/template_class_test.h -check-prefix=CHECK-OLD-TEST-EMPTY -allow-empty %s
+// RUN: FileCheck -input-file=%t.dir/move-template-class/new_template_class_test.cpp -check-prefix=CHECK-NEW-TEST-CPP-CASE1 %s
+// RUN: FileCheck -input-file=%t.dir/move-template-class/new_template_class_test.h -check-prefix=CHECK-NEW-TEST-H-CASE1 %s
//
-// RUN: cp %S/Inputs/template_class_test* %T/move-template-class
-// RUN: clang-move -names="A" -new_cc=%T/move-template-class/new_template_class_test.cpp -new_header=%T/move-template-class/new_template_class_test.h -old_cc=%T/move-template-class/template_class_test.cpp -old_header=../move-template-class/template_class_test.h %T/move-template-class/template_class_test.cpp --
-// RUN: FileCheck -input-file=%T/move-template-class/template_class_test.h -check-prefix=CHECK-OLD-TEST-H-CASE2 %s
-// RUN: FileCheck -input-file=%T/move-template-class/template_class_test.cpp -check-prefix=CHECK-OLD-TEST-CPP-CASE2 %s
-// RUN: FileCheck -input-file=%T/move-template-class/new_template_class_test.h -check-prefix=CHECK-NEW-TEST-H-CASE2 %s
-// RUN: FileCheck -input-file=%T/move-template-class/new_template_class_test.cpp -check-prefix=CHECK-NEW-TEST-CPP-CASE2 %s
+// RUN: cp %S/Inputs/template_class_test* %t.dir/move-template-class
+// RUN: clang-move -names="A" -new_cc=%t.dir/move-template-class/new_template_class_test.cpp -new_header=%t.dir/move-template-class/new_template_class_test.h -old_cc=%t.dir/move-template-class/template_class_test.cpp -old_header=../move-template-class/template_class_test.h %t.dir/move-template-class/template_class_test.cpp --
+// RUN: FileCheck -input-file=%t.dir/move-template-class/template_class_test.h -check-prefix=CHECK-OLD-TEST-H-CASE2 %s
+// RUN: FileCheck -input-file=%t.dir/move-template-class/template_class_test.cpp -check-prefix=CHECK-OLD-TEST-CPP-CASE2 %s
+// RUN: FileCheck -input-file=%t.dir/move-template-class/new_template_class_test.h -check-prefix=CHECK-NEW-TEST-H-CASE2 %s
+// RUN: FileCheck -input-file=%t.dir/move-template-class/new_template_class_test.cpp -check-prefix=CHECK-NEW-TEST-CPP-CASE2 %s
//
//
// CHECK-OLD-TEST-EMPTY: {{^}}{{$}}
diff --git a/clang-tools-extra/test/clang-move/move-type-alias.cpp b/clang-tools-extra/test/clang-move/move-type-alias.cpp
index ab70237..54d2b0e 100644
--- a/clang-tools-extra/test/clang-move/move-type-alias.cpp
+++ b/clang-tools-extra/test/clang-move/move-type-alias.cpp
@@ -1,14 +1,14 @@
-// RUN: mkdir -p %T/move-type-alias
-// RUN: cp %S/Inputs/type_alias.h %T/move-type-alias/type_alias.h
-// RUN: echo '#include "type_alias.h"' > %T/move-type-alias/type_alias.cpp
-// RUN: cd %T/move-type-alias
+// RUN: mkdir -p %t.dir/move-type-alias
+// RUN: cp %S/Inputs/type_alias.h %t.dir/move-type-alias/type_alias.h
+// RUN: echo '#include "type_alias.h"' > %t.dir/move-type-alias/type_alias.cpp
+// RUN: cd %t.dir/move-type-alias
//
// -----------------------------------------------------------------------------
// Test moving typedef declarations.
// -----------------------------------------------------------------------------
-// RUN: clang-move -names="Int1" -new_cc=%T/move-type-alias/new_test.cpp -new_header=%T/move-type-alias/new_test.h -old_cc=%T/move-type-alias/type_alias.cpp -old_header=%T/move-type-alias/type_alias.h %T/move-type-alias/type_alias.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/move-type-alias/new_test.h -check-prefix=CHECK-NEW-TEST-H-CASE1 %s
-// RUN: FileCheck -input-file=%T/move-type-alias/type_alias.h -check-prefix=CHECK-OLD-TEST-H-CASE1 %s
+// RUN: clang-move -names="Int1" -new_cc=%t.dir/move-type-alias/new_test.cpp -new_header=%t.dir/move-type-alias/new_test.h -old_cc=%t.dir/move-type-alias/type_alias.cpp -old_header=%t.dir/move-type-alias/type_alias.h %t.dir/move-type-alias/type_alias.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/move-type-alias/new_test.h -check-prefix=CHECK-NEW-TEST-H-CASE1 %s
+// RUN: FileCheck -input-file=%t.dir/move-type-alias/type_alias.h -check-prefix=CHECK-OLD-TEST-H-CASE1 %s
// CHECK-NEW-TEST-H-CASE1: typedef int Int1;
@@ -18,11 +18,11 @@
// -----------------------------------------------------------------------------
// Test moving type alias declarations.
// -----------------------------------------------------------------------------
-// RUN: cp %S/Inputs/type_alias.h %T/move-type-alias/type_alias.h
-// RUN: echo '#include "type_alias.h"' > %T/move-type-alias/type_alias.cpp
-// RUN: clang-move -names="Int2" -new_cc=%T/move-type-alias/new_test.cpp -new_header=%T/move-type-alias/new_test.h -old_cc=%T/move-type-alias/type_alias.cpp -old_header=%T/move-type-alias/type_alias.h %T/move-type-alias/type_alias.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/move-type-alias/new_test.h -check-prefix=CHECK-NEW-TEST-H-CASE2 %s
-// RUN: FileCheck -input-file=%T/move-type-alias/type_alias.h -check-prefix=CHECK-OLD-TEST-H-CASE2 %s
+// RUN: cp %S/Inputs/type_alias.h %t.dir/move-type-alias/type_alias.h
+// RUN: echo '#include "type_alias.h"' > %t.dir/move-type-alias/type_alias.cpp
+// RUN: clang-move -names="Int2" -new_cc=%t.dir/move-type-alias/new_test.cpp -new_header=%t.dir/move-type-alias/new_test.h -old_cc=%t.dir/move-type-alias/type_alias.cpp -old_header=%t.dir/move-type-alias/type_alias.h %t.dir/move-type-alias/type_alias.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/move-type-alias/new_test.h -check-prefix=CHECK-NEW-TEST-H-CASE2 %s
+// RUN: FileCheck -input-file=%t.dir/move-type-alias/type_alias.h -check-prefix=CHECK-OLD-TEST-H-CASE2 %s
// CHECK-NEW-TEST-H-CASE2: using Int2 = int;
@@ -32,10 +32,10 @@
// -----------------------------------------------------------------------------
// Test moving template type alias declarations.
// -----------------------------------------------------------------------------
-// RUN: cp %S/Inputs/type_alias.h %T/move-type-alias/type_alias.h
-// RUN: echo '#include "type_alias.h"' > %T/move-type-alias/type_alias.cpp
-// RUN: clang-move -names="B" -new_cc=%T/move-type-alias/new_test.cpp -new_header=%T/move-type-alias/new_test.h -old_cc=%T/move-type-alias/type_alias.cpp -old_header=%T/move-type-alias/type_alias.h %T/move-type-alias/type_alias.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/move-type-alias/new_test.h -check-prefix=CHECK-OLD-TEST-H-CASE3 %s
+// RUN: cp %S/Inputs/type_alias.h %t.dir/move-type-alias/type_alias.h
+// RUN: echo '#include "type_alias.h"' > %t.dir/move-type-alias/type_alias.cpp
+// RUN: clang-move -names="B" -new_cc=%t.dir/move-type-alias/new_test.cpp -new_header=%t.dir/move-type-alias/new_test.h -old_cc=%t.dir/move-type-alias/type_alias.cpp -old_header=%t.dir/move-type-alias/type_alias.h %t.dir/move-type-alias/type_alias.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/move-type-alias/new_test.h -check-prefix=CHECK-OLD-TEST-H-CASE3 %s
// CHECK-NEW-TEST-H-CASE3: template<class T> using B = A<T>;
// CHECK-OLD-TEST-H-CASE3-NOT: template<class T> using B = A<T>;
@@ -44,9 +44,9 @@
// -----------------------------------------------------------------------------
// Test not moving class-insided typedef declarations.
// -----------------------------------------------------------------------------
-// RUN: cp %S/Inputs/type_alias.h %T/move-type-alias/type_alias.h
-// RUN: echo '#include "type_alias.h"' > %T/move-type-alias/type_alias.cpp
-// RUN: clang-move -names="C::Int3" -new_cc=%T/move-type-alias/new_test.cpp -new_header=%T/move-type-alias/new_test.h -old_cc=%T/move-type-alias/type_alias.cpp -old_header=%T/move-type-alias/type_alias.h %T/move-type-alias/type_alias.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/move-type-alias/new_test.h -allow-empty -check-prefix=CHECK-EMPTY %s
+// RUN: cp %S/Inputs/type_alias.h %t.dir/move-type-alias/type_alias.h
+// RUN: echo '#include "type_alias.h"' > %t.dir/move-type-alias/type_alias.cpp
+// RUN: clang-move -names="C::Int3" -new_cc=%t.dir/move-type-alias/new_test.cpp -new_header=%t.dir/move-type-alias/new_test.h -old_cc=%t.dir/move-type-alias/type_alias.cpp -old_header=%t.dir/move-type-alias/type_alias.h %t.dir/move-type-alias/type_alias.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/move-type-alias/new_test.h -allow-empty -check-prefix=CHECK-EMPTY %s
// CHECK-EMPTY: {{^}}{{$}}
diff --git a/clang-tools-extra/test/clang-move/move-used-helper-decls.cpp b/clang-tools-extra/test/clang-move/move-used-helper-decls.cpp
index b4aed2c..3092976 100644
--- a/clang-tools-extra/test/clang-move/move-used-helper-decls.cpp
+++ b/clang-tools-extra/test/clang-move/move-used-helper-decls.cpp
@@ -1,13 +1,13 @@
-// RUN: mkdir -p %T/used-helper-decls
-// RUN: cp %S/Inputs/helper_decls_test* %T/used-helper-decls/
-// RUN: cd %T/used-helper-decls
+// RUN: mkdir -p %t.dir/used-helper-decls
+// RUN: cp %S/Inputs/helper_decls_test* %t.dir/used-helper-decls/
+// RUN: cd %t.dir/used-helper-decls
// ----------------------------------------------------------------------------
// Test moving used helper function and its transitively used functions.
// ----------------------------------------------------------------------------
-// RUN: clang-move -names="a::Class1" -new_cc=%T/used-helper-decls/new_helper_decls_test.cpp -new_header=%T/used-helper-decls/new_helper_decls_test.h -old_cc=%T/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %T/used-helper-decls/helper_decls_test.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CLASS1-CPP %s
-// RUN: FileCheck -input-file=%T/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-CLASS1-CPP %s
+// RUN: clang-move -names="a::Class1" -new_cc=%t.dir/used-helper-decls/new_helper_decls_test.cpp -new_header=%t.dir/used-helper-decls/new_helper_decls_test.h -old_cc=%t.dir/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %t.dir/used-helper-decls/helper_decls_test.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CLASS1-CPP %s
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-CLASS1-CPP %s
// CHECK-NEW-CLASS1-CPP: #include "{{.*}}new_helper_decls_test.h"
// CHECK-NEW-CLASS1-CPP-SAME: {{[[:space:]]}}
@@ -31,10 +31,10 @@
// ----------------------------------------------------------------------------
// Test moving used helper function and its transitively used static variables.
// ----------------------------------------------------------------------------
-// RUN: cp %S/Inputs/helper_decls_test* %T/used-helper-decls/
-// RUN: clang-move -names="a::Class2" -new_cc=%T/used-helper-decls/new_helper_decls_test.cpp -new_header=%T/used-helper-decls/new_helper_decls_test.h -old_cc=%T/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %T/used-helper-decls/helper_decls_test.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CLASS2-CPP %s
-// RUN: FileCheck -input-file=%T/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-CLASS2-CPP %s
+// RUN: cp %S/Inputs/helper_decls_test* %t.dir/used-helper-decls/
+// RUN: clang-move -names="a::Class2" -new_cc=%t.dir/used-helper-decls/new_helper_decls_test.cpp -new_header=%t.dir/used-helper-decls/new_helper_decls_test.h -old_cc=%t.dir/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %t.dir/used-helper-decls/helper_decls_test.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CLASS2-CPP %s
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-CLASS2-CPP %s
// CHECK-NEW-CLASS2-CPP: #include "{{.*}}new_helper_decls_test.h"
// CHECK-NEW-CLASS2-CPP-SAME: {{[[:space:]]}}
@@ -67,10 +67,10 @@
// ----------------------------------------------------------------------------
// Test using a static member variable of a helper class.
// ----------------------------------------------------------------------------
-// RUN: cp %S/Inputs/helper_decls_test* %T/used-helper-decls/
-// RUN: clang-move -names="a::Class3" -new_cc=%T/used-helper-decls/new_helper_decls_test.cpp -new_header=%T/used-helper-decls/new_helper_decls_test.h -old_cc=%T/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %T/used-helper-decls/helper_decls_test.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CLASS3-CPP %s
-// RUN: FileCheck -input-file=%T/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-CLASS3-CPP %s
+// RUN: cp %S/Inputs/helper_decls_test* %t.dir/used-helper-decls/
+// RUN: clang-move -names="a::Class3" -new_cc=%t.dir/used-helper-decls/new_helper_decls_test.cpp -new_header=%t.dir/used-helper-decls/new_helper_decls_test.h -old_cc=%t.dir/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %t.dir/used-helper-decls/helper_decls_test.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CLASS3-CPP %s
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-CLASS3-CPP %s
// CHECK-NEW-CLASS3-CPP: #include "{{.*}}new_helper_decls_test.h"
// CHECK-NEW-CLASS3-CPP-SAME: {{[[:space:]]}}
@@ -99,10 +99,10 @@
// ----------------------------------------------------------------------------
// Test moving helper classes.
// ----------------------------------------------------------------------------
-// RUN: cp %S/Inputs/helper_decls_test* %T/used-helper-decls/
-// RUN: clang-move -names="a::Class4" -new_cc=%T/used-helper-decls/new_helper_decls_test.cpp -new_header=%T/used-helper-decls/new_helper_decls_test.h -old_cc=%T/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %T/used-helper-decls/helper_decls_test.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CLASS4-CPP %s
-// RUN: FileCheck -input-file=%T/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-CLASS4-CPP %s
+// RUN: cp %S/Inputs/helper_decls_test* %t.dir/used-helper-decls/
+// RUN: clang-move -names="a::Class4" -new_cc=%t.dir/used-helper-decls/new_helper_decls_test.cpp -new_header=%t.dir/used-helper-decls/new_helper_decls_test.h -old_cc=%t.dir/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %t.dir/used-helper-decls/helper_decls_test.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CLASS4-CPP %s
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-CLASS4-CPP %s
// CHECK-NEW-CLASS4-CPP: #include "{{.*}}new_helper_decls_test.h"
// CHECK-NEW-CLASS4-CPP-SAME: {{[[:space:]]}}
@@ -120,10 +120,10 @@
// ----------------------------------------------------------------------------
// Test moving helper variables and helper functions together.
// ----------------------------------------------------------------------------
-// RUN: cp %S/Inputs/helper_decls_test* %T/used-helper-decls/
-// RUN: clang-move -names="a::Class5" -new_cc=%T/used-helper-decls/new_helper_decls_test.cpp -new_header=%T/used-helper-decls/new_helper_decls_test.h -old_cc=%T/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %T/used-helper-decls/helper_decls_test.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CLASS5-CPP %s
-// RUN: FileCheck -input-file=%T/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-CLASS5-CPP %s
+// RUN: cp %S/Inputs/helper_decls_test* %t.dir/used-helper-decls/
+// RUN: clang-move -names="a::Class5" -new_cc=%t.dir/used-helper-decls/new_helper_decls_test.cpp -new_header=%t.dir/used-helper-decls/new_helper_decls_test.h -old_cc=%t.dir/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %t.dir/used-helper-decls/helper_decls_test.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CLASS5-CPP %s
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-CLASS5-CPP %s
// CHECK-NEW-CLASS5-CPP: #include "{{.*}}new_helper_decls_test.h"
// CHECK-NEW-CLASS5-CPP-SAME: {{[[:space:]]}}
@@ -153,10 +153,10 @@
// ----------------------------------------------------------------------------
// Test moving helper variables and their transitively used helper classes.
// ----------------------------------------------------------------------------
-// RUN: cp %S/Inputs/helper_decls_test* %T/used-helper-decls/
-// RUN: clang-move -names="a::Class6" -new_cc=%T/used-helper-decls/new_helper_decls_test.cpp -new_header=%T/used-helper-decls/new_helper_decls_test.h -old_cc=%T/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %T/used-helper-decls/helper_decls_test.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CLASS6-CPP %s
-// RUN: FileCheck -input-file=%T/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-CLASS6-CPP %s
+// RUN: cp %S/Inputs/helper_decls_test* %t.dir/used-helper-decls/
+// RUN: clang-move -names="a::Class6" -new_cc=%t.dir/used-helper-decls/new_helper_decls_test.cpp -new_header=%t.dir/used-helper-decls/new_helper_decls_test.h -old_cc=%t.dir/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %t.dir/used-helper-decls/helper_decls_test.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CLASS6-CPP %s
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-CLASS6-CPP %s
// CHECK-NEW-CLASS6-CPP: #include "{{.*}}new_helper_decls_test.h"
// CHECK-NEW-CLASS6-CPP-SAME: {{[[:space:]]}}
@@ -186,10 +186,10 @@
// ----------------------------------------------------------------------------
// Test moving classes where its methods use helpers.
// ----------------------------------------------------------------------------
-// RUN: cp %S/Inputs/helper_decls_test* %T/used-helper-decls/
-// RUN: clang-move -names="a::Class7" -new_cc=%T/used-helper-decls/new_helper_decls_test.cpp -new_header=%T/used-helper-decls/new_helper_decls_test.h -old_cc=%T/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %T/used-helper-decls/helper_decls_test.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CLASS7-CPP %s
-// RUN: FileCheck -input-file=%T/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-CLASS7-CPP %s
+// RUN: cp %S/Inputs/helper_decls_test* %t.dir/used-helper-decls/
+// RUN: clang-move -names="a::Class7" -new_cc=%t.dir/used-helper-decls/new_helper_decls_test.cpp -new_header=%t.dir/used-helper-decls/new_helper_decls_test.h -old_cc=%t.dir/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %t.dir/used-helper-decls/helper_decls_test.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CLASS7-CPP %s
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-CLASS7-CPP %s
// CHECK-NEW-CLASS7-CPP: #include "{{.*}}new_helper_decls_test.h"
// CHECK-NEW-CLASS7-CPP-SAME: {{[[:space:]]}}
@@ -218,10 +218,10 @@
// ----------------------------------------------------------------------------
// Test moving helper function and its transitively used helper variables.
// ----------------------------------------------------------------------------
-// RUN: cp %S/Inputs/helper_decls_test* %T/used-helper-decls/
-// RUN: clang-move -names="a::Fun1" -new_cc=%T/used-helper-decls/new_helper_decls_test.cpp -new_header=%T/used-helper-decls/new_helper_decls_test.h -old_cc=%T/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %T/used-helper-decls/helper_decls_test.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-FUN1-CPP %s
-// RUN: FileCheck -input-file=%T/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-FUN1-CPP %s
+// RUN: cp %S/Inputs/helper_decls_test* %t.dir/used-helper-decls/
+// RUN: clang-move -names="a::Fun1" -new_cc=%t.dir/used-helper-decls/new_helper_decls_test.cpp -new_header=%t.dir/used-helper-decls/new_helper_decls_test.h -old_cc=%t.dir/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %t.dir/used-helper-decls/helper_decls_test.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-FUN1-CPP %s
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-FUN1-CPP %s
// CHECK-NEW-FUN1-CPP: #include "{{.*}}new_helper_decls_test.h"
// CHECK-NEW-FUN1-CPP-SAME: {{[[:space:]]}}
@@ -244,11 +244,11 @@
// ----------------------------------------------------------------------------
// Test no moving helpers when moving inline functions in header.
// ----------------------------------------------------------------------------
-// RUN: cp %S/Inputs/helper_decls_test* %T/used-helper-decls/
-// RUN: clang-move -names="a::Fun2" -new_cc=%T/used-helper-decls/new_helper_decls_test.cpp -new_header=%T/used-helper-decls/new_helper_decls_test.h -old_cc=%T/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %T/used-helper-decls/helper_decls_test.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-FUN2-CPP %s
-// RUN: FileCheck -input-file=%T/used-helper-decls/new_helper_decls_test.h -check-prefix=CHECK-NEW-FUN2-H %s
-// RUN: FileCheck -input-file=%T/used-helper-decls/helper_decls_test.h -check-prefix=CHECK-OLD-FUN2-H %s
+// RUN: cp %S/Inputs/helper_decls_test* %t.dir/used-helper-decls/
+// RUN: clang-move -names="a::Fun2" -new_cc=%t.dir/used-helper-decls/new_helper_decls_test.cpp -new_header=%t.dir/used-helper-decls/new_helper_decls_test.h -old_cc=%t.dir/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %t.dir/used-helper-decls/helper_decls_test.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-FUN2-CPP %s
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/new_helper_decls_test.h -check-prefix=CHECK-NEW-FUN2-H %s
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/helper_decls_test.h -check-prefix=CHECK-OLD-FUN2-H %s
// CHECK-NEW-FUN2-H: namespace a {
// CHECK-NEW-FUN2-H-NEXT: inline void Fun2() {}
@@ -262,10 +262,10 @@
// ----------------------------------------------------------------------------
// Test moving used helper function and its transitively used functions.
// ----------------------------------------------------------------------------
-// RUN: cp %S/Inputs/helper_decls_test* %T/used-helper-decls/
-// RUN: clang-move -names="b::Fun3" -new_cc=%T/used-helper-decls/new_helper_decls_test.cpp -new_header=%T/used-helper-decls/new_helper_decls_test.h -old_cc=%T/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %T/used-helper-decls/helper_decls_test.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-FUN3-CPP %s
-// RUN: FileCheck -input-file=%T/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-FUN3-CPP %s
+// RUN: cp %S/Inputs/helper_decls_test* %t.dir/used-helper-decls/
+// RUN: clang-move -names="b::Fun3" -new_cc=%t.dir/used-helper-decls/new_helper_decls_test.cpp -new_header=%t.dir/used-helper-decls/new_helper_decls_test.h -old_cc=%t.dir/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %t.dir/used-helper-decls/helper_decls_test.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-FUN3-CPP %s
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/helper_decls_test.cpp -check-prefix=CHECK-OLD-FUN3-CPP %s
// CHECK-NEW-FUN3-CPP: #include "{{.*}}new_helper_decls_test.h"
// CHECK-NEW-FUN3-CPP-SAME: {{[[:space:]]}}
@@ -294,12 +294,12 @@
// ----------------------------------------------------------------------------
// Test moving all symbols in headers.
// ----------------------------------------------------------------------------
-// RUN: cp %S/Inputs/helper_decls_test* %T/used-helper-decls/
-// RUN: clang-move -names="a::Class1, a::Class2, a::Class3, a::Class4, a::Class5, a::Class5, a::Class6, a::Class7, a::Fun1, a::Fun2, b::Fun3" -new_cc=%T/used-helper-decls/new_helper_decls_test.cpp -new_header=%T/used-helper-decls/new_helper_decls_test.h -old_cc=%T/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %T/used-helper-decls/helper_decls_test.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/used-helper-decls/new_helper_decls_test.h -check-prefix=CHECK-NEW-H %s
-// RUN: FileCheck -input-file=%T/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CPP %s
-// RUN: FileCheck -input-file=%T/used-helper-decls/helper_decls_test.h -allow-empty -check-prefix=CHECK-EMPTY %s
-// RUN: FileCheck -input-file=%T/used-helper-decls/helper_decls_test.cpp -allow-empty -check-prefix=CHECK-EMPTY %s
+// RUN: cp %S/Inputs/helper_decls_test* %t.dir/used-helper-decls/
+// RUN: clang-move -names="a::Class1, a::Class2, a::Class3, a::Class4, a::Class5, a::Class5, a::Class6, a::Class7, a::Fun1, a::Fun2, b::Fun3" -new_cc=%t.dir/used-helper-decls/new_helper_decls_test.cpp -new_header=%t.dir/used-helper-decls/new_helper_decls_test.h -old_cc=%t.dir/used-helper-decls/helper_decls_test.cpp -old_header=../used-helper-decls/helper_decls_test.h %t.dir/used-helper-decls/helper_decls_test.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/new_helper_decls_test.h -check-prefix=CHECK-NEW-H %s
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/new_helper_decls_test.cpp -check-prefix=CHECK-NEW-CPP %s
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/helper_decls_test.h -allow-empty -check-prefix=CHECK-EMPTY %s
+// RUN: FileCheck -input-file=%t.dir/used-helper-decls/helper_decls_test.cpp -allow-empty -check-prefix=CHECK-EMPTY %s
// CHECK-NEW-H: namespace a {
diff --git a/clang-tools-extra/test/clang-move/move-var.cpp b/clang-tools-extra/test/clang-move/move-var.cpp
index 4a3554c..bacba0b 100644
--- a/clang-tools-extra/test/clang-move/move-var.cpp
+++ b/clang-tools-extra/test/clang-move/move-var.cpp
@@ -1,11 +1,11 @@
-// RUN: mkdir -p %T/move-var
-// RUN: cp %S/Inputs/var_test* %T/move-var
-// RUN: cd %T/move-var
-// RUN: clang-move -names="a::kGlobalInt" -new_header=%T/move-var/new_var_test.h -old_header=../move-var/var_test.h -old_cc=../move-var/var_test.cpp -new_cc=%T/move-var/new_var_test.cpp %T/move-var/var_test.cpp --
-// RUN: FileCheck -input-file=%T/move-var/var_test.h -check-prefix=CHECK-OLD-VAR-H-CASE1 %s
-// RUN: FileCheck -input-file=%T/move-var/var_test.cpp -check-prefix=CHECK-OLD-VAR-CPP-CASE1 %s
-// RUN: FileCheck -input-file=%T/move-var/new_var_test.h -check-prefix=CHECK-NEW-VAR-H-CASE1 %s
-// RUN: FileCheck -input-file=%T/move-var/new_var_test.cpp -check-prefix=CHECK-NEW-VAR-CPP-CASE1 %s
+// RUN: mkdir -p %t.dir/move-var
+// RUN: cp %S/Inputs/var_test* %t.dir/move-var
+// RUN: cd %t.dir/move-var
+// RUN: clang-move -names="a::kGlobalInt" -new_header=%t.dir/move-var/new_var_test.h -old_header=../move-var/var_test.h -old_cc=../move-var/var_test.cpp -new_cc=%t.dir/move-var/new_var_test.cpp %t.dir/move-var/var_test.cpp --
+// RUN: FileCheck -input-file=%t.dir/move-var/var_test.h -check-prefix=CHECK-OLD-VAR-H-CASE1 %s
+// RUN: FileCheck -input-file=%t.dir/move-var/var_test.cpp -check-prefix=CHECK-OLD-VAR-CPP-CASE1 %s
+// RUN: FileCheck -input-file=%t.dir/move-var/new_var_test.h -check-prefix=CHECK-NEW-VAR-H-CASE1 %s
+// RUN: FileCheck -input-file=%t.dir/move-var/new_var_test.cpp -check-prefix=CHECK-NEW-VAR-CPP-CASE1 %s
// CHECK-OLD-VAR-H-CASE1-NOT: extern int kGlobalInt;
// CHECK-OLD-VAR-H-CASE1: int kGlobalInt = 3;
@@ -18,12 +18,12 @@
// CHECK-NEW-VAR-CPP-CASE1: int kGlobalInt = 1;
-// RUN: cp %S/Inputs/var_test* %T/move-var
-// RUN: clang-move -names="a::kGlobalStr" -new_header=%T/move-var/new_var_test.h -old_header=../move-var/var_test.h -old_cc=../move-var/var_test.cpp -new_cc=%T/move-var/new_var_test.cpp %T/move-var/var_test.cpp --
-// RUN: FileCheck -input-file=%T/move-var/var_test.h -check-prefix=CHECK-OLD-VAR-H-CASE2 %s
-// RUN: FileCheck -input-file=%T/move-var/var_test.cpp -check-prefix=CHECK-OLD-VAR-CPP-CASE2 %s
-// RUN: FileCheck -input-file=%T/move-var/new_var_test.h -check-prefix=CHECK-NEW-VAR-H-CASE2 %s
-// RUN: FileCheck -input-file=%T/move-var/new_var_test.cpp -check-prefix=CHECK-NEW-VAR-CPP-CASE2 %s
+// RUN: cp %S/Inputs/var_test* %t.dir/move-var
+// RUN: clang-move -names="a::kGlobalStr" -new_header=%t.dir/move-var/new_var_test.h -old_header=../move-var/var_test.h -old_cc=../move-var/var_test.cpp -new_cc=%t.dir/move-var/new_var_test.cpp %t.dir/move-var/var_test.cpp --
+// RUN: FileCheck -input-file=%t.dir/move-var/var_test.h -check-prefix=CHECK-OLD-VAR-H-CASE2 %s
+// RUN: FileCheck -input-file=%t.dir/move-var/var_test.cpp -check-prefix=CHECK-OLD-VAR-CPP-CASE2 %s
+// RUN: FileCheck -input-file=%t.dir/move-var/new_var_test.h -check-prefix=CHECK-NEW-VAR-H-CASE2 %s
+// RUN: FileCheck -input-file=%t.dir/move-var/new_var_test.cpp -check-prefix=CHECK-NEW-VAR-CPP-CASE2 %s
// CHECK-OLD-VAR-H-CASE2-NOT: extern const char *const kGlobalStr;
// CHECK-OLD-VAR-H-CASE2: const char *const kGlobalStr = "Hello2";
@@ -36,10 +36,10 @@
// CHECK-NEW-VAR-CPP-CASE2: const char *const kGlobalStr = "Hello";
-// RUN: cp %S/Inputs/var_test* %T/move-var
-// RUN: clang-move -names="kEvilInt" -new_header=%T/move-var/new_var_test.h -old_header=../move-var/var_test.h -old_cc=../move-var/var_test.cpp -new_cc=%T/move-var/new_var_test.cpp %T/move-var/var_test.cpp --
-// RUN: FileCheck -input-file=%T/move-var/var_test.h -check-prefix=CHECK-OLD-VAR-H-CASE3 %s
-// RUN: FileCheck -input-file=%T/move-var/new_var_test.h -check-prefix=CHECK-NEW-VAR-H-CASE3 %s
+// RUN: cp %S/Inputs/var_test* %t.dir/move-var
+// RUN: clang-move -names="kEvilInt" -new_header=%t.dir/move-var/new_var_test.h -old_header=../move-var/var_test.h -old_cc=../move-var/var_test.cpp -new_cc=%t.dir/move-var/new_var_test.cpp %t.dir/move-var/var_test.cpp --
+// RUN: FileCheck -input-file=%t.dir/move-var/var_test.h -check-prefix=CHECK-OLD-VAR-H-CASE3 %s
+// RUN: FileCheck -input-file=%t.dir/move-var/new_var_test.h -check-prefix=CHECK-NEW-VAR-H-CASE3 %s
// CHECK-OLD-VAR-H-CASE3-NOT: int kEvilInt = 2;
diff --git a/clang-tools-extra/test/clang-move/no-move-macro-helpers.cpp b/clang-tools-extra/test/clang-move/no-move-macro-helpers.cpp
index 282eee0..e7a5b4e 100644
--- a/clang-tools-extra/test/clang-move/no-move-macro-helpers.cpp
+++ b/clang-tools-extra/test/clang-move/no-move-macro-helpers.cpp
@@ -1,16 +1,16 @@
-// RUN: mkdir -p %T/no-move-macro-helper
-// RUN: cat %S/Inputs/macro_helper_test.h > %T/no-move-macro-helper/macro_helper_test.h
-// RUN: cat %S/Inputs/macro_helper_test.cpp > %T/no-move-macro-helper/macro_helper_test.cpp
-// RUN: cd %T/no-move-macro-helper
+// RUN: mkdir -p %t.dir/no-move-macro-helper
+// RUN: cat %S/Inputs/macro_helper_test.h > %t.dir/no-move-macro-helper/macro_helper_test.h
+// RUN: cat %S/Inputs/macro_helper_test.cpp > %t.dir/no-move-macro-helper/macro_helper_test.cpp
+// RUN: cd %t.dir/no-move-macro-helper
//
// -----------------------------------------------------------------------------
// Test no moving helpers in macro.
// -----------------------------------------------------------------------------
-// RUN: clang-move -names="A" -new_cc=%T/no-move-macro-helper/new_test.cpp -new_header=%T/no-move-macro-helper/new_test.h -old_cc=%T/no-move-macro-helper/macro_helper_test.cpp -old_header=%T/no-move-macro-helper/macro_helper_test.h %T/no-move-macro-helper/macro_helper_test.cpp -- -std=c++11
-// RUN: FileCheck -input-file=%T/no-move-macro-helper/new_test.h -check-prefix=CHECK-NEW-TEST-CASE1-H %s
-// RUN: FileCheck -input-file=%T/no-move-macro-helper/new_test.cpp -check-prefix=CHECK-NEW-TEST-CASE1-CPP %s
-// RUN: FileCheck -input-file=%T/no-move-macro-helper/macro_helper_test.h -check-prefix=CHECK-OLD-TEST-CASE1-H %s
-// RUN: FileCheck -input-file=%T/no-move-macro-helper/macro_helper_test.cpp -check-prefix=CHECK-OLD-TEST-CASE1-CPP %s
+// RUN: clang-move -names="A" -new_cc=%t.dir/no-move-macro-helper/new_test.cpp -new_header=%t.dir/no-move-macro-helper/new_test.h -old_cc=%t.dir/no-move-macro-helper/macro_helper_test.cpp -old_header=%t.dir/no-move-macro-helper/macro_helper_test.h %t.dir/no-move-macro-helper/macro_helper_test.cpp -- -std=c++11
+// RUN: FileCheck -input-file=%t.dir/no-move-macro-helper/new_test.h -check-prefix=CHECK-NEW-TEST-CASE1-H %s
+// RUN: FileCheck -input-file=%t.dir/no-move-macro-helper/new_test.cpp -check-prefix=CHECK-NEW-TEST-CASE1-CPP %s
+// RUN: FileCheck -input-file=%t.dir/no-move-macro-helper/macro_helper_test.h -check-prefix=CHECK-OLD-TEST-CASE1-H %s
+// RUN: FileCheck -input-file=%t.dir/no-move-macro-helper/macro_helper_test.cpp -check-prefix=CHECK-OLD-TEST-CASE1-CPP %s
// CHECK-NEW-TEST-CASE1-H: class A {};
@@ -24,14 +24,14 @@
// -----------------------------------------------------------------------------
// Test moving all.
// -----------------------------------------------------------------------------
-// RUN: cat %S/Inputs/macro_helper_test.h > %T/no-move-macro-helper/macro_helper_test.h
-// RUN: cat %S/Inputs/macro_helper_test.cpp > %T/no-move-macro-helper/macro_helper_test.cpp
-// RUN: clang-move -names="A, f1" -new_cc=%T/no-move-macro-helper/new_test.cpp -new_header=%T/no-move-macro-helper/new_test.h -old_cc=%T/no-move-macro-helper/macro_helper_test.cpp -old_header=%T/no-move-macro-helper/macro_helper_test.h %T/no-move-macro-helper/macro_helper_test.cpp -- -std=c++11
+// RUN: cat %S/Inputs/macro_helper_test.h > %t.dir/no-move-macro-helper/macro_helper_test.h
+// RUN: cat %S/Inputs/macro_helper_test.cpp > %t.dir/no-move-macro-helper/macro_helper_test.cpp
+// RUN: clang-move -names="A, f1" -new_cc=%t.dir/no-move-macro-helper/new_test.cpp -new_header=%t.dir/no-move-macro-helper/new_test.h -old_cc=%t.dir/no-move-macro-helper/macro_helper_test.cpp -old_header=%t.dir/no-move-macro-helper/macro_helper_test.h %t.dir/no-move-macro-helper/macro_helper_test.cpp -- -std=c++11
//
-// RUN: FileCheck -input-file=%T/no-move-macro-helper/new_test.h -check-prefix=CHECK-NEW-TEST-CASE2-H %s
-// RUN: FileCheck -input-file=%T/no-move-macro-helper/new_test.cpp -check-prefix=CHECK-NEW-TEST-CASE2-CPP %s
-// RUN: FileCheck -input-file=%T/no-move-macro-helper/macro_helper_test.h -allow-empty -check-prefix=CHECK-EMPTY %s
-// RUN: FileCheck -input-file=%T/no-move-macro-helper/macro_helper_test.cpp -allow-empty -check-prefix=CHECK-EMPTY %s
+// RUN: FileCheck -input-file=%t.dir/no-move-macro-helper/new_test.h -check-prefix=CHECK-NEW-TEST-CASE2-H %s
+// RUN: FileCheck -input-file=%t.dir/no-move-macro-helper/new_test.cpp -check-prefix=CHECK-NEW-TEST-CASE2-CPP %s
+// RUN: FileCheck -input-file=%t.dir/no-move-macro-helper/macro_helper_test.h -allow-empty -check-prefix=CHECK-EMPTY %s
+// RUN: FileCheck -input-file=%t.dir/no-move-macro-helper/macro_helper_test.cpp -allow-empty -check-prefix=CHECK-EMPTY %s
// CHECK-NEW-TEST-CASE2-H: class A {};
// CHECK-NEW-TEST-CASE2-H-NEXT:void f1();
diff --git a/clang-tools-extra/test/clang-tidy/checkers/bugprone/invalid-enum-default-initialization.c b/clang-tools-extra/test/clang-tidy/checkers/bugprone/invalid-enum-default-initialization.c
new file mode 100644
index 0000000..55f5884
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/invalid-enum-default-initialization.c
@@ -0,0 +1,54 @@
+// RUN: %check_clang_tidy %s bugprone-invalid-enum-default-initialization %t
+
+enum Enum1 {
+ Enum1_A = 1,
+ Enum1_B
+};
+
+struct Struct1 {
+ int a;
+ enum Enum1 b;
+};
+
+struct Struct2 {
+ struct Struct1 a;
+ char b;
+};
+
+enum Enum1 E1 = {};
+// CHECK-NOTES: :[[@LINE-1]]:17: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+// CHECK-NOTES: :3:6: note: enum is defined here
+enum Enum1 E2[10] = {};
+// CHECK-NOTES: :[[@LINE-1]]:21: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+// CHECK-NOTES: :3:6: note: enum is defined here
+enum Enum1 E3[10] = {Enum1_A};
+// CHECK-NOTES: :[[@LINE-1]]:21: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+// CHECK-NOTES: :3:6: note: enum is defined here
+enum Enum1 E4[2][2] = {{Enum1_A}, {Enum1_A}};
+// CHECK-NOTES: :[[@LINE-1]]:24: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+// CHECK-NOTES: :3:6: note: enum is defined here
+// CHECK-NOTES: :[[@LINE-3]]:35: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+// CHECK-NOTES: :3:6: note: enum is defined here
+enum Enum1 E5[2][2] = {{Enum1_A, Enum1_A}};
+// CHECK-NOTES: :[[@LINE-1]]:23: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+// CHECK-NOTES: :3:6: note: enum is defined here
+
+
+struct Struct1 S1[2][2] = {{{1, Enum1_A}, {2, Enum1_A}}};
+// CHECK-NOTES: :[[@LINE-1]]:27: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+// CHECK-NOTES: :3:6: note: enum is defined here
+
+struct Struct2 S2[3] = {{1}};
+// CHECK-NOTES: :[[@LINE-1]]:24: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+// CHECK-NOTES: :3:6: note: enum is defined here
+// CHECK-NOTES: :[[@LINE-3]]:26: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+// CHECK-NOTES: :3:6: note: enum is defined here
+
+union Union1 {
+ enum Enum1 a;
+ int b;
+};
+
+// no warnings for union
+union Union1 U1 = {};
+union Union1 U2[3] = {};
diff --git a/clang-tools-extra/test/clang-tidy/checkers/bugprone/invalid-enum-default-initialization.cpp b/clang-tools-extra/test/clang-tidy/checkers/bugprone/invalid-enum-default-initialization.cpp
new file mode 100644
index 0000000..eb3d563
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/checkers/bugprone/invalid-enum-default-initialization.cpp
@@ -0,0 +1,145 @@
+// RUN: %check_clang_tidy -std=c++17 %s bugprone-invalid-enum-default-initialization %t
+
+enum class Enum0: int {
+ A = 0,
+ B
+};
+
+enum class Enum1: int {
+ A = 1,
+ B
+};
+
+enum Enum2 {
+ Enum_A = 4,
+ Enum_B
+};
+
+Enum0 E0_1{};
+Enum0 E0_2 = Enum0();
+Enum0 E0_3;
+Enum0 E0_4{0};
+Enum0 E0_5{Enum0::A};
+Enum0 E0_6{Enum0::B};
+
+Enum1 E1_1{};
+// CHECK-NOTES: :[[@LINE-1]]:11: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+// CHECK-NOTES: :8:12: note: enum is defined here
+Enum1 E1_2 = Enum1();
+// CHECK-NOTES: :[[@LINE-1]]:14: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+// CHECK-NOTES: :8:12: note: enum is defined here
+Enum1 E1_3;
+Enum1 E1_4{0};
+Enum1 E1_5{Enum1::A};
+Enum1 E1_6{Enum1::B};
+
+Enum2 E2_1{};
+// CHECK-NOTES: :[[@LINE-1]]:11: warning: enum value of type 'Enum2' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+// CHECK-NOTES: :13:6: note: enum is defined here
+Enum2 E2_2 = Enum2();
+// CHECK-NOTES: :[[@LINE-1]]:14: warning: enum value of type 'Enum2' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+// CHECK-NOTES: :13:6: note: enum is defined here
+
+void f1() {
+ static Enum1 S; // FIMXE: warn for this?
+ Enum1 A;
+ Enum1 B = Enum1();
+ // CHECK-NOTES: :[[@LINE-1]]:13: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+ // CHECK-NOTES: :8:12: note: enum is defined here
+ int C = int();
+}
+
+void f2() {
+ Enum1 A{};
+ // CHECK-NOTES: :[[@LINE-1]]:10: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+ // CHECK-NOTES: :8:12: note: enum is defined here
+ Enum1 B = Enum1();
+ // CHECK-NOTES: :[[@LINE-1]]:13: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+ // CHECK-NOTES: :8:12: note: enum is defined here
+ Enum1 C[5] = {{}};
+ // CHECK-NOTES: :[[@LINE-1]]:16: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+ // CHECK-NOTES: :8:12: note: enum is defined here
+ // CHECK-NOTES: :[[@LINE-3]]:17: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+ // CHECK-NOTES: :8:12: note: enum is defined here
+ Enum1 D[5] = {}; // FIMXE: warn for this?
+ // CHECK-NOTES: :[[@LINE-1]]:16: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+ // CHECK-NOTES: :8:12: note: enum is defined here
+}
+
+struct S1 {
+ Enum1 E_1{};
+ // CHECK-NOTES: :[[@LINE-1]]:12: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+ // CHECK-NOTES: :8:12: note: enum is defined here
+ Enum1 E_2 = Enum1();
+ // CHECK-NOTES: :[[@LINE-1]]:15: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+ // CHECK-NOTES: :8:12: note: enum is defined here
+ Enum1 E_3;
+ Enum1 E_4;
+ Enum1 E_5;
+
+ S1() :
+ E_3{},
+ // CHECK-NOTES: :[[@LINE-1]]:8: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+ // CHECK-NOTES: :8:12: note: enum is defined here
+ E_4(),
+ // CHECK-NOTES: :[[@LINE-1]]:8: warning: enum value of type 'Enum1' initialized with invalid value of 0, enum doesn't have a zero-value enumerator
+ // CHECK-NOTES: :8:12: note: enum is defined here
+ E_5{Enum1::B}
+ {}
+};
+
+struct S2 {
+ Enum0 X;
+ Enum1 Y;
+ Enum2 Z;
+};
+
+struct S3 {
+ S2 X;
+ int Y;
+};
+
+struct S4 : public S3 {
+ int Z;
+};
+
+struct S5 {
+ S2 X[3];
+ int Y;
+};
+
+S2 VarS2{};
+// CHECK-NOTES: :[[@LINE-1]]:9: warning: enum value of type 'Enum1' initialized with invalid value of 0
+// CHECK-NOTES: :8:12: note: enum is defined here
+// CHECK-NOTES: :[[@LINE-3]]:9: warning: enum value of type 'Enum2' initialized with invalid value of 0
+// CHECK-NOTES: :13:6: note: enum is defined here
+S3 VarS3{};
+// CHECK-NOTES: :[[@LINE-1]]:10: warning: enum value of type 'Enum1' initialized with invalid value of 0
+// CHECK-NOTES: :8:12: note: enum is defined here
+// CHECK-NOTES: :[[@LINE-3]]:10: warning: enum value of type 'Enum2' initialized with invalid value of 0
+// CHECK-NOTES: :13:6: note: enum is defined here
+S4 VarS4{};
+// CHECK-NOTES: :[[@LINE-1]]:10: warning: enum value of type 'Enum1' initialized with invalid value of 0
+// CHECK-NOTES: :8:12: note: enum is defined here
+// CHECK-NOTES: :[[@LINE-3]]:10: warning: enum value of type 'Enum2' initialized with invalid value of 0
+// CHECK-NOTES: :13:6: note: enum is defined here
+S5 VarS5{};
+// CHECK-NOTES: :[[@LINE-1]]:10: warning: enum value of type 'Enum1' initialized with invalid value of 0
+// CHECK-NOTES: :8:12: note: enum is defined here
+
+enum class EnumFwd;
+
+EnumFwd Fwd{};
+
+enum class EnumEmpty {};
+
+EnumEmpty Empty{};
+
+template<typename T>
+struct Templ {
+ T Mem1{};
+ // CHECK-NOTES: :[[@LINE-1]]:9: warning: enum value of type 'Enum1' initialized with invalid value of 0
+ // CHECK-NOTES: :8:12: note: enum is defined here
+};
+
+Templ<Enum1> TemplVar;
diff --git a/clang-tools-extra/test/clang-tidy/checkers/misc/header-include-cycle.cpp b/clang-tools-extra/test/clang-tidy/checkers/misc/header-include-cycle.cpp
index d3c71ad..3694bdd 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/misc/header-include-cycle.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/misc/header-include-cycle.cpp
@@ -1,15 +1,15 @@
-// RUN: rm -rf %T/misc-header-include-cycle-headers
-// RUN: mkdir %T/misc-header-include-cycle-headers
-// RUN: cp -r %S/Inputs/header-include-cycle* %T/misc-header-include-cycle-headers/
-// RUN: mkdir %T/misc-header-include-cycle-headers/system
-// RUN: cp -r %S/Inputs/system/header-include-cycle* %T/misc-header-include-cycle-headers/system
-// RUN: cp %s %T/header-include-cycle.cpp
-// RUN: clang-tidy %T%{fs-sep}header-include-cycle.cpp -checks='-*,misc-header-include-cycle' -header-filter=.* \
+// RUN: rm -rf %t.dir/misc-header-include-cycle-headers
+// RUN: mkdir -p %t.dir/misc-header-include-cycle-headers
+// RUN: cp -r %S/Inputs/header-include-cycle* %t.dir/misc-header-include-cycle-headers/
+// RUN: mkdir %t.dir/misc-header-include-cycle-headers/system
+// RUN: cp -r %S/Inputs/system/header-include-cycle* %t.dir/misc-header-include-cycle-headers/system
+// RUN: cp %s %t.dir/header-include-cycle.cpp
+// RUN: clang-tidy %t.dir%{fs-sep}header-include-cycle.cpp -checks='-*,misc-header-include-cycle' -header-filter=.* \
// RUN: -config="{CheckOptions: {misc-header-include-cycle.IgnoredFilesList: 'header-include-cycle.self-e.hpp'}}" \
-// RUN: -- -I%T%{fs-sep}misc-header-include-cycle-headers -isystem %T%{fs-sep}misc-header-include-cycle-headers%{fs-sep}system \
-// RUN: --include %T%{fs-sep}misc-header-include-cycle-headers%{fs-sep}header-include-cycle.self-i.hpp | FileCheck %s \
+// RUN: -- -I%t.dir%{fs-sep}misc-header-include-cycle-headers -isystem %t.dir%{fs-sep}misc-header-include-cycle-headers%{fs-sep}system \
+// RUN: --include %t.dir%{fs-sep}misc-header-include-cycle-headers%{fs-sep}header-include-cycle.self-i.hpp | FileCheck %s \
// RUN: -check-prefix=CHECK-MESSAGES "-implicit-check-not={{note|warning|error}}:" --dump-input=fail
-// RUN: rm -rf %T/misc-header-include-cycle-headers
+// RUN: rm -rf %t.dir/misc-header-include-cycle-headers
#ifndef MAIN_GUARD
#define MAIN_GUARD
diff --git a/clang-tools-extra/test/clang-tidy/checkers/misc/unused-parameters.cpp b/clang-tools-extra/test/clang-tidy/checkers/misc/unused-parameters.cpp
index f1918e9..c963cb5 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/misc/unused-parameters.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/misc/unused-parameters.cpp
@@ -1,11 +1,11 @@
-// RUN: echo "static void staticFunctionHeader(int i) {;}" > %T/header.h
-// RUN: echo "static void staticFunctionHeader(int /*i*/) {;}" > %T/header-fixed.h
-// RUN: %check_clang_tidy --match-partial-fixes -std=c++11 %s misc-unused-parameters %t -- -header-filter='.*' -- -fno-delayed-template-parsing
-// RUN: diff %T/header.h %T/header-fixed.h
+// RUN: mkdir -p %t.dir
+// RUN: echo "static void staticFunctionHeader(int i) {;}" > %t.dir/header.h
+// RUN: echo "static void staticFunctionHeader(int /*i*/) {;}" > %t.dir/header-fixed.h
+// RUN: %check_clang_tidy --match-partial-fixes -std=c++11 %s misc-unused-parameters %t.dir/code -- -header-filter='.*' -- -fno-delayed-template-parsing
+// RUN: diff %t.dir/header.h %t.dir/header-fixed.h
// FIXME: Make the test work in all language modes.
#include "header.h"
-// CHECK-MESSAGES: header.h:1:38: warning
// Basic removal
// =============
@@ -306,3 +306,5 @@ void test() {
// Do not warn on naked functions.
[[gnu::naked]] int nakedFunction(int a, float b, const char *c) { ; }
__attribute__((naked)) void nakedFunction(int a, int b) { ; }
+
+// CHECK-MESSAGES: header.h:1:38: warning
diff --git a/clang-tools-extra/test/clang-tidy/checkers/modernize/concat-nested-namespaces.cpp b/clang-tools-extra/test/clang-tidy/checkers/modernize/concat-nested-namespaces.cpp
index a4f50dd..78adbeb 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/modernize/concat-nested-namespaces.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/modernize/concat-nested-namespaces.cpp
@@ -1,10 +1,11 @@
-// RUN: cp %S/Inputs/concat-nested-namespaces/modernize-concat-nested-namespaces.h %T/modernize-concat-nested-namespaces.h
-// RUN: %check_clang_tidy --match-partial-fixes -std=c++17 -check-suffix=NORMAL %s modernize-concat-nested-namespaces %t -- -header-filter=".*" -- -I %T
-// RUN: FileCheck -input-file=%T/modernize-concat-nested-namespaces.h %S/Inputs/concat-nested-namespaces/modernize-concat-nested-namespaces.h -check-prefix=CHECK-FIXES
+// RUN: mkdir -p %t.dir
+// RUN: cp %S/Inputs/concat-nested-namespaces/modernize-concat-nested-namespaces.h %t.dir/modernize-concat-nested-namespaces.h
+// RUN: %check_clang_tidy --match-partial-fixes -std=c++17 -check-suffix=NORMAL %s modernize-concat-nested-namespaces %t.dir/code -- -header-filter=".*" -- -I %t.dir
+// RUN: FileCheck -input-file=%t.dir/modernize-concat-nested-namespaces.h %S/Inputs/concat-nested-namespaces/modernize-concat-nested-namespaces.h -check-prefix=CHECK-FIXES
// Restore header file and re-run with c++20:
-// RUN: cp %S/Inputs/concat-nested-namespaces/modernize-concat-nested-namespaces.h %T/modernize-concat-nested-namespaces.h
-// RUN: %check_clang_tidy --match-partial-fixes -std=c++20 -check-suffixes=NORMAL,CPP20 %s modernize-concat-nested-namespaces %t -- -header-filter=".*" -- -I %T
-// RUN: FileCheck -input-file=%T/modernize-concat-nested-namespaces.h %S/Inputs/concat-nested-namespaces/modernize-concat-nested-namespaces.h -check-prefix=CHECK-FIXES
+// RUN: cp %S/Inputs/concat-nested-namespaces/modernize-concat-nested-namespaces.h %t.dir/modernize-concat-nested-namespaces.h
+// RUN: %check_clang_tidy --match-partial-fixes -std=c++20 -check-suffixes=NORMAL,CPP20 %s modernize-concat-nested-namespaces %t.dir/code -- -header-filter=".*" -- -I %t.dir
+// RUN: FileCheck -input-file=%t.dir/modernize-concat-nested-namespaces.h %S/Inputs/concat-nested-namespaces/modernize-concat-nested-namespaces.h -check-prefix=CHECK-FIXES
#include "modernize-concat-nested-namespaces.h"
// CHECK-MESSAGES-NORMAL-DAG: modernize-concat-nested-namespaces.h:1:1: warning: nested namespaces can be concatenated [modernize-concat-nested-namespaces]
diff --git a/clang-tools-extra/test/clang-tidy/checkers/modernize/pass-by-value-header.cpp b/clang-tools-extra/test/clang-tidy/checkers/modernize/pass-by-value-header.cpp
index 5d2f1af..461a637 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/modernize/pass-by-value-header.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/modernize/pass-by-value-header.cpp
@@ -1,6 +1,7 @@
-// RUN: cp %S/Inputs/pass-by-value/header.h %T/pass-by-value-header.h
-// RUN: clang-tidy %s -checks='-*,modernize-pass-by-value' -header-filter='.*' -fix -- -std=c++11 -I %T | FileCheck %s -check-prefix=CHECK-MESSAGES -implicit-check-not="{{warning|error}}:"
-// RUN: FileCheck -input-file=%T/pass-by-value-header.h %s -check-prefix=CHECK-FIXES
+// RUN: mkdir -p %t.dir
+// RUN: cp %S/Inputs/pass-by-value/header.h %t.dir/pass-by-value-header.h
+// RUN: clang-tidy %s -checks='-*,modernize-pass-by-value' -header-filter='.*' -fix -- -std=c++11 -I %t.dir | FileCheck %s -check-prefix=CHECK-MESSAGES -implicit-check-not="{{warning|error}}:"
+// RUN: FileCheck -input-file=%t.dir/pass-by-value-header.h %s -check-prefix=CHECK-FIXES
// FIXME: Make the test work in all language modes.
#include "pass-by-value-header.h"
diff --git a/clang-tools-extra/test/clang-tidy/checkers/modernize/pass-by-value-multi-fixes.cpp b/clang-tools-extra/test/clang-tidy/checkers/modernize/pass-by-value-multi-fixes.cpp
index 238e445..b77c74b 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/modernize/pass-by-value-multi-fixes.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/modernize/pass-by-value-multi-fixes.cpp
@@ -1,12 +1,13 @@
-// RUN: cat %S/Inputs/pass-by-value/header-with-fix.h > %T/pass-by-value-header-with-fix.h
-// RUN: sed -e 's#//.*$##' %s > %t.cpp
-// RUN: clang-tidy %t.cpp -checks='-*,modernize-pass-by-value' -header-filter='.*' -fix -- -std=c++11 -I %T | FileCheck %s -check-prefix=CHECK-MESSAGES -implicit-check-not="{{warning|error}}:"
-// RUN: FileCheck -input-file=%t.cpp %s -check-prefix=CHECK-FIXES
-// RUN: FileCheck -input-file=%T/pass-by-value-header-with-fix.h %s -check-prefix=CHECK-HEADER-FIXES
+// RUN: mkdir -p %t.dir
+// RUN: cat %S/Inputs/pass-by-value/header-with-fix.h > %t.dir/pass-by-value-header-with-fix.h
+// RUN: sed -e 's#//.*$##' %s > %t.dir/code.cpp
+// RUN: clang-tidy %t.dir/code.cpp -checks='-*,modernize-pass-by-value' -header-filter='.*' -fix -- -std=c++11 -I %t.dir | FileCheck %s -check-prefix=CHECK-MESSAGES -implicit-check-not="{{warning|error}}:"
+// RUN: FileCheck -input-file=%t.dir/code.cpp %s -check-prefix=CHECK-FIXES
+// RUN: FileCheck -input-file=%t.dir/pass-by-value-header-with-fix.h %s -check-prefix=CHECK-HEADER-FIXES
#include "pass-by-value-header-with-fix.h"
// CHECK-HEADER-FIXES: Foo(S s);
Foo::Foo(const S &s) : s(s) {}
-// CHECK-MESSAGES: :9:10: warning: pass by value and use std::move [modernize-pass-by-value]
+// CHECK-MESSAGES: :10:10: warning: pass by value and use std::move [modernize-pass-by-value]
// CHECK-FIXES: #include <utility>
// CHECK-FIXES: Foo::Foo(S s) : s(std::move(s)) {}
diff --git a/clang-tools-extra/test/clang-tidy/checkers/portability/restrict-system-includes-transitive.cpp b/clang-tools-extra/test/clang-tidy/checkers/portability/restrict-system-includes-transitive.cpp
index 744f45f..ff7c66b 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/portability/restrict-system-includes-transitive.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/portability/restrict-system-includes-transitive.cpp
@@ -1,12 +1,12 @@
-// RUN: rm -rf %T/Headers
-// RUN: mkdir %T/Headers
-// RUN: cp -r %S/Inputs/restrict-system-includes %T/Headers/portability-restrict-system-includes
+// RUN: rm -rf %t.dir/Headers
+// RUN: mkdir -p %t.dir/Headers
+// RUN: cp -r %S/Inputs/restrict-system-includes %t.dir/Headers/portability-restrict-system-includes
// RUN: %check_clang_tidy -std=c++11 %s portability-restrict-system-includes %t \
// RUN: -- -config="{CheckOptions: {portability-restrict-system-includes.Includes: 'transitive.h,s.h'}}" \
// RUN: -system-headers -header-filter=.* \
-// RUN: -- -I %T/Headers/portability-restrict-system-includes -isystem %T/Headers/portability-restrict-system-includes/system
-// RUN: FileCheck -input-file=%T/Headers/portability-restrict-system-includes/transitive2.h %s -check-prefix=CHECK-FIXES
-// RUN: rm -rf %T/Headers
+// RUN: -- -I %t.dir/Headers/portability-restrict-system-includes -isystem %t.dir/Headers/portability-restrict-system-includes/system
+// RUN: FileCheck -input-file=%t.dir/Headers/portability-restrict-system-includes/transitive2.h %s -check-prefix=CHECK-FIXES
+// RUN: rm -rf %t.dir/Headers
// FIXME: Make the test work in all language modes.
// transitive.h includes <r.h> and <t.h>
diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming-symlink.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming-symlink.cpp
index 34dc340..7c99f70 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming-symlink.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming-symlink.cpp
@@ -2,11 +2,12 @@
// in the header file so it fails if runs multiple times with different
// `-std` flags as check_clang_tidy doesn by default.
//
-// RUN: rm -rf %T/symlink
-// RUN: cp -r %S/Inputs/identifier-naming/symlink %T/symlink
-// RUN: mkdir -p %T/symlink/build
-// RUN: ln -s %T/symlink/include/test.h %T/symlink/build/test.h
-// RUN: %check_clang_tidy -std=c++20 %s readability-identifier-naming %t -- --header-filter="test.h" --config-file=%S/Inputs/identifier-naming/symlink/include/.clang-tidy -- -I %T/symlink/build
+// RUN: rm -rf %t.dir
+// RUN: mkdir -p %t.dir
+// RUN: cp -r %S/Inputs/identifier-naming/symlink %t.dir/symlink
+// RUN: mkdir -p %t.dir/symlink/build
+// RUN: ln -s %t.dir/symlink/include/test.h %t.dir/symlink/build/test.h
+// RUN: %check_clang_tidy -std=c++20 %s readability-identifier-naming %t.dir -- --header-filter="test.h" --config-file=%S/Inputs/identifier-naming/symlink/include/.clang-tidy -- -I %t.dir/symlink/build
// UNSUPPORTED: system-windows
#include "test.h"
diff --git a/clang-tools-extra/test/clang-tidy/infrastructure/clang-tidy-diff.cpp b/clang-tools-extra/test/clang-tidy/infrastructure/clang-tidy-diff.cpp
index 7aa6ce9..25be90f 100644
--- a/clang-tools-extra/test/clang-tidy/infrastructure/clang-tidy-diff.cpp
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/clang-tidy-diff.cpp
@@ -3,9 +3,9 @@
// RUN: clang-tidy -checks=-*,modernize-use-override %t.cpp -- -std=c++11 | FileCheck -check-prefix=CHECK-SANITY %s
// RUN: not diff -U0 %s %t.cpp | %clang_tidy_diff -checks=-*,modernize-use-override -- -std=c++11 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-JMAX
// RUN: not diff -U0 %s %t.cpp | %clang_tidy_diff -checks=-*,modernize-use-override -quiet -- -std=c++11 2>&1 | FileCheck -check-prefix=CHECK-QUIET %s
-// RUN: mkdir -p %T/compilation-database-test/
-// RUN: echo '[{"directory": "%T", "command": "clang++ -o test.o -std=c++11 %t.cpp", "file": "%t.cpp"}]' > %T/compilation-database-test/compile_commands.json
-// RUN: not diff -U0 %s %t.cpp | %clang_tidy_diff -checks=-*,modernize-use-override -path %T/compilation-database-test 2>&1 | FileCheck -check-prefix=CHECK %s
+// RUN: mkdir -p %t.dir/compilation-database-test/
+// RUN: echo '[{"directory": "%t.dir", "command": "clang++ -o test.o -std=c++11 %t.cpp", "file": "%t.cpp"}]' > %t.dir/compilation-database-test/compile_commands.json
+// RUN: not diff -U0 %s %t.cpp | %clang_tidy_diff -checks=-*,modernize-use-override -path %t.dir/compilation-database-test 2>&1 | FileCheck -check-prefix=CHECK %s
// RUN: not diff -U0 %s %t.cpp | %clang_tidy_diff -checks=-*,modernize-use-override -j 1 -- -std=c++11 2>&1 | FileCheck %s --check-prefix=CHECK-J1
// CHECK-J1: Running clang-tidy in 1 threads...
diff --git a/clang-tools-extra/test/clang-tidy/infrastructure/clang-tidy-run-with-database.cpp b/clang-tools-extra/test/clang-tidy/infrastructure/clang-tidy-run-with-database.cpp
index 3c4e849..9ca0ab3 100644
--- a/clang-tools-extra/test/clang-tidy/infrastructure/clang-tidy-run-with-database.cpp
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/clang-tidy-run-with-database.cpp
@@ -1,28 +1,28 @@
-// RUN: mkdir -p %T/compilation-database-test/include
-// RUN: mkdir -p %T/compilation-database-test/a
-// RUN: mkdir -p %T/compilation-database-test/b
-// RUN: echo 'int *AA = 0;' > %T/compilation-database-test/a/a.cpp
-// RUN: echo 'int *AB = 0;' > %T/compilation-database-test/a/b.cpp
-// RUN: echo 'int *BB = 0;' > %T/compilation-database-test/b/b.cpp
-// RUN: echo 'int *BC = 0;' > %T/compilation-database-test/b/c.cpp
-// RUN: echo 'int *BD = 0;' > %T/compilation-database-test/b/d.cpp
-// RUN: echo 'int *HP = 0;' > %T/compilation-database-test/include/header.h
-// RUN: echo '#include "header.h"' > %T/compilation-database-test/include.cpp
-// RUN: sed 's|test_dir|%/T/compilation-database-test|g' %S/Inputs/compilation-database/template.json > %T/compile_commands.json
+// RUN: mkdir -p %t.dir/compilation-database-test/include
+// RUN: mkdir -p %t.dir/compilation-database-test/a
+// RUN: mkdir -p %t.dir/compilation-database-test/b
+// RUN: echo 'int *AA = 0;' > %t.dir/compilation-database-test/a/a.cpp
+// RUN: echo 'int *AB = 0;' > %t.dir/compilation-database-test/a/b.cpp
+// RUN: echo 'int *BB = 0;' > %t.dir/compilation-database-test/b/b.cpp
+// RUN: echo 'int *BC = 0;' > %t.dir/compilation-database-test/b/c.cpp
+// RUN: echo 'int *BD = 0;' > %t.dir/compilation-database-test/b/d.cpp
+// RUN: echo 'int *HP = 0;' > %t.dir/compilation-database-test/include/header.h
+// RUN: echo '#include "header.h"' > %t.dir/compilation-database-test/include.cpp
+// RUN: sed 's|test_dir|%/t.dir/compilation-database-test|g' %S/Inputs/compilation-database/template.json > %t.dir/compile_commands.json
// Regression test: shouldn't crash.
-// RUN: not clang-tidy --checks=-*,modernize-use-nullptr -p %T %T/compilation-database-test/b/not-exist -header-filter=.* 2>&1 | FileCheck %s -check-prefix=CHECK-NOT-EXIST
+// RUN: not clang-tidy --checks=-*,modernize-use-nullptr -p %t.dir %t.dir/compilation-database-test/b/not-exist -header-filter=.* 2>&1 | FileCheck %s -check-prefix=CHECK-NOT-EXIST
// CHECK-NOT-EXIST: Error while processing {{.*[/\\]}}not-exist.
// CHECK-NOT-EXIST: unable to handle compilation
// CHECK-NOT-EXIST: Found compiler error
-// RUN: clang-tidy --checks=-*,modernize-use-nullptr -p %T %T/compilation-database-test/a/a.cpp %T/compilation-database-test/a/b.cpp %T/compilation-database-test/b/b.cpp %T/compilation-database-test/b/c.cpp %T/compilation-database-test/b/d.cpp %T/compilation-database-test/include.cpp -header-filter=.* -fix
-// RUN: FileCheck -input-file=%T/compilation-database-test/a/a.cpp %s -check-prefix=CHECK-FIX1
-// RUN: FileCheck -input-file=%T/compilation-database-test/a/b.cpp %s -check-prefix=CHECK-FIX2
-// RUN: FileCheck -input-file=%T/compilation-database-test/b/b.cpp %s -check-prefix=CHECK-FIX3
-// RUN: FileCheck -input-file=%T/compilation-database-test/b/c.cpp %s -check-prefix=CHECK-FIX4
-// RUN: FileCheck -input-file=%T/compilation-database-test/b/d.cpp %s -check-prefix=CHECK-FIX5
-// RUN: FileCheck -input-file=%T/compilation-database-test/include/header.h %s -check-prefix=CHECK-FIX6
+// RUN: clang-tidy --checks=-*,modernize-use-nullptr -p %t.dir %t.dir/compilation-database-test/a/a.cpp %t.dir/compilation-database-test/a/b.cpp %t.dir/compilation-database-test/b/b.cpp %t.dir/compilation-database-test/b/c.cpp %t.dir/compilation-database-test/b/d.cpp %t.dir/compilation-database-test/include.cpp -header-filter=.* -fix
+// RUN: FileCheck -input-file=%t.dir/compilation-database-test/a/a.cpp %s -check-prefix=CHECK-FIX1
+// RUN: FileCheck -input-file=%t.dir/compilation-database-test/a/b.cpp %s -check-prefix=CHECK-FIX2
+// RUN: FileCheck -input-file=%t.dir/compilation-database-test/b/b.cpp %s -check-prefix=CHECK-FIX3
+// RUN: FileCheck -input-file=%t.dir/compilation-database-test/b/c.cpp %s -check-prefix=CHECK-FIX4
+// RUN: FileCheck -input-file=%t.dir/compilation-database-test/b/d.cpp %s -check-prefix=CHECK-FIX5
+// RUN: FileCheck -input-file=%t.dir/compilation-database-test/include/header.h %s -check-prefix=CHECK-FIX6
// CHECK-FIX1: int *AA = nullptr;
// CHECK-FIX2: int *AB = nullptr;
diff --git a/clang-tools-extra/test/clang-tidy/infrastructure/clang-tidy-store-check-profile-one-tu.cpp b/clang-tools-extra/test/clang-tidy/infrastructure/clang-tidy-store-check-profile-one-tu.cpp
index f0939f7..192fbf5 100644
--- a/clang-tools-extra/test/clang-tidy/infrastructure/clang-tidy-store-check-profile-one-tu.cpp
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/clang-tidy-store-check-profile-one-tu.cpp
@@ -1,9 +1,9 @@
-// RUN: rm -rf %T/out
-// RUN: clang-tidy -enable-check-profile -checks='-*,readability-function-size' -store-check-profile=%T/out %s -- 2>&1 | not FileCheck --match-full-lines -implicit-check-not='{{warning:|error:}}' -check-prefix=CHECK-CONSOLE %s
-// RUN: cat %T/out/*-clang-tidy-store-check-profile-one-tu.cpp.json | FileCheck --match-full-lines -implicit-check-not='{{warning:|error:}}' -check-prefix=CHECK-FILE %s
-// RUN: rm -rf %T/out
-// RUN: clang-tidy -enable-check-profile -checks='-*,readability-function-size' -store-check-profile=%T/out %s -- 2>&1
-// RUN: cat %T/out/*-clang-tidy-store-check-profile-one-tu.cpp.json | FileCheck --match-full-lines -implicit-check-not='{{warning:|error:}}' -check-prefix=CHECK-FILE %s
+// RUN: rm -rf %t.dir/out
+// RUN: clang-tidy -enable-check-profile -checks='-*,readability-function-size' -store-check-profile=%t.dir/out %s -- 2>&1 | not FileCheck --match-full-lines -implicit-check-not='{{warning:|error:}}' -check-prefix=CHECK-CONSOLE %s
+// RUN: cat %t.dir/out/*-clang-tidy-store-check-profile-one-tu.cpp.json | FileCheck --match-full-lines -implicit-check-not='{{warning:|error:}}' -check-prefix=CHECK-FILE %s
+// RUN: rm -rf %t.dir/out
+// RUN: clang-tidy -enable-check-profile -checks='-*,readability-function-size' -store-check-profile=%t.dir/out %s -- 2>&1
+// RUN: cat %t.dir/out/*-clang-tidy-store-check-profile-one-tu.cpp.json | FileCheck --match-full-lines -implicit-check-not='{{warning:|error:}}' -check-prefix=CHECK-FILE %s
// CHECK-CONSOLE-NOT: ===-------------------------------------------------------------------------===
// CHECK-CONSOLE-NOT: {{.*}} --- Name ---
diff --git a/clang-tools-extra/test/clang-tidy/infrastructure/diagnostic.cpp b/clang-tools-extra/test/clang-tidy/infrastructure/diagnostic.cpp
index 57d930b..610d1da 100644
--- a/clang-tools-extra/test/clang-tidy/infrastructure/diagnostic.cpp
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/diagnostic.cpp
@@ -14,15 +14,15 @@
// Now create a directory with a compilation database file and ensure we don't
// use it after failing to parse commands from the command line:
//
-// RUN: mkdir -p %T/diagnostics/
-// RUN: echo '[{"directory": "%/T/diagnostics/","command": "clang++ -fan-option-from-compilation-database -c %/T/diagnostics/input.cpp", "file": "%/T/diagnostics/input.cpp"}]' > %T/diagnostics/compile_commands.json
-// RUN: cat %s > %T/diagnostics/input.cpp
-// RUN: not clang-tidy -checks='-*,modernize-use-override' %T/diagnostics/nonexistent.cpp -- 2>&1 | FileCheck -check-prefix=CHECK1 -implicit-check-not='{{warning:|error:}}' %s
-// RUN: not clang-tidy -checks='-*,clang-diagnostic-*,google-explicit-constructor' %T/diagnostics/input.cpp -- -fan-unknown-option 2>&1 | FileCheck -check-prefix=CHECK2 -implicit-check-not='{{warning:|error:}}' %s
-// RUN: not clang-tidy -checks='-*,google-explicit-constructor,clang-diagnostic-literal-conversion' %T/diagnostics/input.cpp -- -fan-unknown-option 2>&1 | FileCheck -check-prefix=CHECK3 -implicit-check-not='{{warning:|error:}}' %s
-// RUN: clang-tidy -checks='-*,modernize-use-override,clang-diagnostic-macro-redefined' %T/diagnostics/input.cpp -- -DMACRO_FROM_COMMAND_LINE 2>&1 | FileCheck -check-prefix=CHECK4 -implicit-check-not='{{warning:|error:}}' %s
-// RUN: not clang-tidy -checks='-*,clang-diagnostic-*,google-explicit-constructor' %T/diagnostics/input.cpp 2>&1 | FileCheck -check-prefix=CHECK5 -implicit-check-not='{{warning:|error:}}' %s
-// RUN: not clang-tidy -checks='-*,modernize-use-override' %T/diagnostics/input.cpp -- -DCOMPILATION_ERROR 2>&1 | FileCheck -check-prefix=CHECK6 -implicit-check-not='{{warning:|error:}}' %s
+// RUN: mkdir -p %t.dir/diagnostics/
+// RUN: echo '[{"directory": "%/t.dir/diagnostics/","command": "clang++ -fan-option-from-compilation-database -c %/T/diagnostics/input.cpp", "file": "%/T/diagnostics/input.cpp"}]' > %t.dir/diagnostics/compile_commands.json
+// RUN: cat %s > %t.dir/diagnostics/input.cpp
+// RUN: not clang-tidy -checks='-*,modernize-use-override' %t.dir/diagnostics/nonexistent.cpp -- 2>&1 | FileCheck -check-prefix=CHECK1 -implicit-check-not='{{warning:|error:}}' %s
+// RUN: not clang-tidy -checks='-*,clang-diagnostic-*,google-explicit-constructor' %t.dir/diagnostics/input.cpp -- -fan-unknown-option 2>&1 | FileCheck -check-prefix=CHECK2 -implicit-check-not='{{warning:|error:}}' %s
+// RUN: not clang-tidy -checks='-*,google-explicit-constructor,clang-diagnostic-literal-conversion' %t.dir/diagnostics/input.cpp -- -fan-unknown-option 2>&1 | FileCheck -check-prefix=CHECK3 -implicit-check-not='{{warning:|error:}}' %s
+// RUN: clang-tidy -checks='-*,modernize-use-override,clang-diagnostic-macro-redefined' %t.dir/diagnostics/input.cpp -- -DMACRO_FROM_COMMAND_LINE 2>&1 | FileCheck -check-prefix=CHECK4 -implicit-check-not='{{warning:|error:}}' %s
+// RUN: not clang-tidy -checks='-*,clang-diagnostic-*,google-explicit-constructor' %t.dir/diagnostics/input.cpp 2>&1 | FileCheck -check-prefix=CHECK5 -implicit-check-not='{{warning:|error:}}' %s
+// RUN: not clang-tidy -checks='-*,modernize-use-override' %t.dir/diagnostics/input.cpp -- -DCOMPILATION_ERROR 2>&1 | FileCheck -check-prefix=CHECK6 -implicit-check-not='{{warning:|error:}}' %s
// RUN: clang-tidy -checks='-*,modernize-use-override,clang-diagnostic-macro-redefined' %s -- -DMACRO_FROM_COMMAND_LINE -std=c++20 | FileCheck -check-prefix=CHECK4 -implicit-check-not='{{warning:|error:}}' %s
// RUN: clang-tidy -checks='-*,modernize-use-override,clang-diagnostic-macro-redefined,clang-diagnostic-literal-conversion' %s -- -DMACRO_FROM_COMMAND_LINE -std=c++20 -Wno-macro-redefined | FileCheck --check-prefix=CHECK7 -implicit-check-not='{{warning:|error:}}' %s
// RUN: clang-tidy -checks='-*,modernize-use-override' %s -- -std=c++20 -DPR64602
diff --git a/clang-tools-extra/test/clang-tidy/infrastructure/export-relpath.cpp b/clang-tools-extra/test/clang-tidy/infrastructure/export-relpath.cpp
index 5bfd41f..5fd7303 100644
--- a/clang-tools-extra/test/clang-tidy/infrastructure/export-relpath.cpp
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/export-relpath.cpp
@@ -1,14 +1,15 @@
-// RUN: rm -rf %T/clang-tidy/export-relpath
-// RUN: mkdir -p %T/clang-tidy/export-relpath/subdir
-// RUN: cp %s %T/clang-tidy/export-relpath/subdir/source.cpp
-// RUN: echo '[{ "directory": "%/T/clang-tidy/export-relpath/subdir", "command": "clang++ source.cpp", "file": "%/T/clang-tidy/export-relpath/subdir/source.cpp"}]' > %T/clang-tidy/export-relpath/subdir/compile_commands.json
+// RUN: mkdir -p %t.dir
+// RUN: rm -rf %t.dir/clang-tidy/export-relpath
+// RUN: mkdir -p %t.dir/clang-tidy/export-relpath/subdir
+// RUN: cp %s %t.dir/clang-tidy/export-relpath/subdir/source.cpp
+// RUN: echo '[{ "directory": "%/t.dir/clang-tidy/export-relpath/subdir", "command": "clang++ source.cpp", "file": "%/T/clang-tidy/export-relpath/subdir/source.cpp"}]' > %t.dir/clang-tidy/export-relpath/subdir/compile_commands.json
//
// Check that running clang-tidy in './subdir' and storing results
// in './fixes.yaml' works as expected.
//
-// RUN: cd %T/clang-tidy/export-relpath
+// RUN: cd %t.dir/clang-tidy/export-relpath
// RUN: clang-tidy -p subdir subdir/source.cpp -checks='-*,google-explicit-constructor,llvm-namespace-comment' -export-fixes=./fixes.yaml
-// RUN: FileCheck -input-file=%T/clang-tidy/export-relpath/fixes.yaml -check-prefix=CHECK-YAML %s
+// RUN: FileCheck -input-file=%t.dir/clang-tidy/export-relpath/fixes.yaml -check-prefix=CHECK-YAML %s
namespace i {
void f(); // So that the namespace isn't empty.
diff --git a/clang-tools-extra/test/clang-tidy/infrastructure/list-checks.cpp b/clang-tools-extra/test/clang-tidy/infrastructure/list-checks.cpp
index 674c118..73bb5b7 100644
--- a/clang-tools-extra/test/clang-tidy/infrastructure/list-checks.cpp
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/list-checks.cpp
@@ -1,4 +1,4 @@
-// RUN: mkdir -p %T/clang-tidy/list-checks/
-// RUN: echo '{Checks: "-*,google-*"}' > %T/clang-tidy/.clang-tidy
-// RUN: cd %T/clang-tidy/list-checks
+// RUN: mkdir -p %t.dir/clang-tidy/list-checks/
+// RUN: echo '{Checks: "-*,google-*"}' > %t.dir/clang-tidy/.clang-tidy
+// RUN: cd %t.dir/clang-tidy/list-checks
// RUN: clang-tidy -list-checks | grep "^ *google-"
diff --git a/clang-tools-extra/test/clang-tidy/infrastructure/read_file_config.cpp b/clang-tools-extra/test/clang-tidy/infrastructure/read_file_config.cpp
index 3e39b4d..8b12f45 100644
--- a/clang-tools-extra/test/clang-tidy/infrastructure/read_file_config.cpp
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/read_file_config.cpp
@@ -1,10 +1,10 @@
// REQUIRES: static-analyzer
-// RUN: mkdir -p %T/read-file-config/
-// RUN: cp %s %T/read-file-config/test.cpp
-// RUN: echo 'Checks: "-*,modernize-use-nullptr"' > %T/read-file-config/.clang-tidy
-// RUN: echo '[{"command": "cc -c -o test.o test.cpp", "directory": "%/T/read-file-config", "file": "%/T/read-file-config/test.cpp"}]' > %T/read-file-config/compile_commands.json
-// RUN: clang-tidy %T/read-file-config/test.cpp | not grep "warning: .*\[clang-analyzer-deadcode.DeadStores\]$"
-// RUN: clang-tidy -checks="-*,clang-analyzer-*" %T/read-file-config/test.cpp | grep "warning: .*\[clang-analyzer-deadcode.DeadStores\]$"
+// RUN: mkdir -p %t.dir/read-file-config/
+// RUN: cp %s %t.dir/read-file-config/test.cpp
+// RUN: echo 'Checks: "-*,modernize-use-nullptr"' > %t.dir/read-file-config/.clang-tidy
+// RUN: echo '[{"command": "cc -c -o test.o test.cpp", "directory": "%/t.dir/read-file-config", "file": "%/t.dir/read-file-config/test.cpp"}]' > %t.dir/read-file-config/compile_commands.json
+// RUN: clang-tidy %t.dir/read-file-config/test.cpp | not grep "warning: .*\[clang-analyzer-deadcode.DeadStores\]$"
+// RUN: clang-tidy -checks="-*,clang-analyzer-*" %t.dir/read-file-config/test.cpp | grep "warning: .*\[clang-analyzer-deadcode.DeadStores\]$"
void f() {
int x;
diff --git a/clang-tools-extra/test/clang-tidy/infrastructure/verify-config.cpp b/clang-tools-extra/test/clang-tidy/infrastructure/verify-config.cpp
index 93f6f9f..30d4933 100644
--- a/clang-tools-extra/test/clang-tidy/infrastructure/verify-config.cpp
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/verify-config.cpp
@@ -19,22 +19,22 @@
// CHECK-VERIFY: command-line option '-checks': warning: unknown check 'llvm-includeorder'; did you mean 'llvm-include-order' [-verify-config]
// CHECK-VERIFY: command-line option '-checks': warning: unknown check 'my-made-up-check' [-verify-config]
-// RUN: echo -e 'Checks: |\n bugprone-argument-comment\n bugprone-assert-side-effect,\n bugprone-bool-pointer-implicit-conversion\n readability-use-anyof*' > %T/MyClangTidyConfig
+// RUN: echo -e 'Checks: |\n bugprone-argument-comment\n bugprone-assert-side-effect,\n bugprone-bool-pointer-implicit-conversion\n readability-use-anyof*' > %t.MyClangTidyConfig
// RUN: clang-tidy -verify-config \
-// RUN: --config-file=%T/MyClangTidyConfig | FileCheck %s -check-prefix=CHECK-VERIFY-BLOCK-OK
+// RUN: --config-file=%t.MyClangTidyConfig | FileCheck %s -check-prefix=CHECK-VERIFY-BLOCK-OK
// CHECK-VERIFY-BLOCK-OK: No config errors detected.
-// RUN: echo -e 'Checks: |\n bugprone-arguments-*\n bugprone-assert-side-effects\n bugprone-bool-pointer-implicit-conversion' > %T/MyClangTidyConfigBad
+// RUN: echo -e 'Checks: |\n bugprone-arguments-*\n bugprone-assert-side-effects\n bugprone-bool-pointer-implicit-conversion' > %t.MyClangTidyConfigBad
// RUN: not clang-tidy -verify-config \
-// RUN: --config-file=%T/MyClangTidyConfigBad 2>&1 | FileCheck %s -check-prefix=CHECK-VERIFY-BLOCK-BAD
+// RUN: --config-file=%t.MyClangTidyConfigBad 2>&1 | FileCheck %s -check-prefix=CHECK-VERIFY-BLOCK-BAD
// CHECK-VERIFY-BLOCK-BAD: command-line option '-config': warning: check glob 'bugprone-arguments-*' doesn't match any known check [-verify-config]
// CHECK-VERIFY-BLOCK-BAD: command-line option '-config': warning: unknown check 'bugprone-assert-side-effects'; did you mean 'bugprone-assert-side-effect' [-verify-config]
-// RUN: echo -e 'Checks: "-*,clang-analyzer-optin.cplusplus.UninitializedObject"\nCheckOptions:\n clang-analyzer-optin.cplusplus.UninitializedObject:Pedantic: true' > %T/MyClangTidyConfigCSA
-// RUN: clang-tidy --verify-config --config-file=%T/MyClangTidyConfigCSA 2>&1 | FileCheck %s -check-prefix=CHECK-VERIFY-CSA-OK -implicit-check-not='{{warnings|error}}'
+// RUN: echo -e 'Checks: "-*,clang-analyzer-optin.cplusplus.UninitializedObject"\nCheckOptions:\n clang-analyzer-optin.cplusplus.UninitializedObject:Pedantic: true' > %t.MyClangTidyConfigCSA
+// RUN: clang-tidy --verify-config --config-file=%t.MyClangTidyConfigCSA 2>&1 | FileCheck %s -check-prefix=CHECK-VERIFY-CSA-OK -implicit-check-not='{{warnings|error}}'
// CHECK-VERIFY-CSA-OK: No config errors detected.
-// RUN: echo -e 'Checks: "-*,clang-analyzer-optin.cplusplus.UninitializedObject"\nCheckOptions:\n clang-analyzer-optin.cplusplus.UninitializedObject.Pedantic: true' > %T/MyClangTidyConfigCSABad
-// RUN: not clang-tidy --verify-config --config-file=%T/MyClangTidyConfigCSABad 2>&1 | FileCheck %s -check-prefix=CHECK-VERIFY-CSA-BAD -implicit-check-not='{{warnings|error}}'
+// RUN: echo -e 'Checks: "-*,clang-analyzer-optin.cplusplus.UninitializedObject"\nCheckOptions:\n clang-analyzer-optin.cplusplus.UninitializedObject.Pedantic: true' > %t.MyClangTidyConfigCSABad
+// RUN: not clang-tidy --verify-config --config-file=%t.MyClangTidyConfigCSABad 2>&1 | FileCheck %s -check-prefix=CHECK-VERIFY-CSA-BAD -implicit-check-not='{{warnings|error}}'
// CHECK-VERIFY-CSA-BAD: command-line option '-config': warning: unknown check option 'clang-analyzer-optin.cplusplus.UninitializedObject.Pedantic'; did you mean 'clang-analyzer-optin.cplusplus.UninitializedObject:Pedantic' [-verify-config]
diff --git a/clang-tools-extra/test/modularize/NoProblemsAssistant.modularize b/clang-tools-extra/test/modularize/NoProblemsAssistant.modularize
index 7ddc726..39c06dc 100644
--- a/clang-tools-extra/test/modularize/NoProblemsAssistant.modularize
+++ b/clang-tools-extra/test/modularize/NoProblemsAssistant.modularize
@@ -1,5 +1,7 @@
+# RUN: mkdir -p %t.dir/Output
+# RUN: cd %t.dir
# RUN: modularize -module-map-path=Output/NoProblemsAssistant.txt -root-module=Root -prefix=%S/Input %s
-# RUN: FileCheck --input-file=%T/NoProblemsAssistant.txt %s
+# RUN: FileCheck --input-file=Output/NoProblemsAssistant.txt %s
SomeTypes.h
SomeDecls.h
diff --git a/clang-tools-extra/unittests/clang-apply-replacements/ApplyReplacementsTest.cpp b/clang-tools-extra/unittests/clang-apply-replacements/ApplyReplacementsTest.cpp
index 87b0d69..0b92118 100644
--- a/clang-tools-extra/unittests/clang-apply-replacements/ApplyReplacementsTest.cpp
+++ b/clang-tools-extra/unittests/clang-apply-replacements/ApplyReplacementsTest.cpp
@@ -33,8 +33,7 @@ makeTUDiagnostics(const std::string &MainSourceFile, StringRef DiagnosticName,
// before applying.
TEST(ApplyReplacementsTest, mergeDiagnosticsWithNoFixes) {
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()), DiagOpts);
+ DiagnosticsEngine Diagnostics(DiagnosticIDs::create(), DiagOpts);
FileManager Files((FileSystemOptions()));
SourceManager SM(Diagnostics, Files);
TUReplacements TURs;
diff --git a/clang-tools-extra/unittests/clang-tidy/ClangTidyOptionsTest.cpp b/clang-tools-extra/unittests/clang-tidy/ClangTidyOptionsTest.cpp
index d3ca26a..410cebf 100644
--- a/clang-tools-extra/unittests/clang-tidy/ClangTidyOptionsTest.cpp
+++ b/clang-tools-extra/unittests/clang-tidy/ClangTidyOptionsTest.cpp
@@ -318,7 +318,8 @@ TEST(CheckOptionsValidation, MissingOptions) {
ClangTidyGlobalOptions(), Options));
ClangTidyDiagnosticConsumer DiagConsumer(Context);
auto DiagOpts = std::make_unique<DiagnosticOptions>();
- DiagnosticsEngine DE(new DiagnosticIDs(), *DiagOpts, &DiagConsumer, false);
+ DiagnosticsEngine DE(DiagnosticIDs::create(), *DiagOpts, &DiagConsumer,
+ false);
Context.setDiagnosticsEngine(std::move(DiagOpts), &DE);
TestCheck TestCheck(&Context);
EXPECT_FALSE(TestCheck.getLocal("Opt"));
@@ -348,7 +349,8 @@ TEST(CheckOptionsValidation, ValidIntOptions) {
ClangTidyGlobalOptions(), Options));
ClangTidyDiagnosticConsumer DiagConsumer(Context);
auto DiagOpts = std::make_unique<DiagnosticOptions>();
- DiagnosticsEngine DE(new DiagnosticIDs(), *DiagOpts, &DiagConsumer, false);
+ DiagnosticsEngine DE(DiagnosticIDs::create(), *DiagOpts, &DiagConsumer,
+ false);
Context.setDiagnosticsEngine(std::move(DiagOpts), &DE);
TestCheck TestCheck(&Context);
@@ -410,7 +412,8 @@ TEST(ValidConfiguration, ValidEnumOptions) {
ClangTidyGlobalOptions(), Options));
ClangTidyDiagnosticConsumer DiagConsumer(Context);
auto DiagOpts = std::make_unique<DiagnosticOptions>();
- DiagnosticsEngine DE(new DiagnosticIDs(), *DiagOpts, &DiagConsumer, false);
+ DiagnosticsEngine DE(DiagnosticIDs::create(), *DiagOpts, &DiagConsumer,
+ false);
Context.setDiagnosticsEngine(std::move(DiagOpts), &DE);
TestCheck TestCheck(&Context);
diff --git a/clang-tools-extra/unittests/clang-tidy/ClangTidyTest.h b/clang-tools-extra/unittests/clang-tidy/ClangTidyTest.h
index 789cc2a..89f0f9f 100644
--- a/clang-tools-extra/unittests/clang-tidy/ClangTidyTest.h
+++ b/clang-tools-extra/unittests/clang-tidy/ClangTidyTest.h
@@ -97,7 +97,8 @@ runCheckOnCode(StringRef Code, std::vector<ClangTidyError> *Errors = nullptr,
ClangTidyGlobalOptions(), Options));
ClangTidyDiagnosticConsumer DiagConsumer(Context);
auto DiagOpts = std::make_unique<DiagnosticOptions>();
- DiagnosticsEngine DE(new DiagnosticIDs(), *DiagOpts, &DiagConsumer, false);
+ DiagnosticsEngine DE(DiagnosticIDs::create(), *DiagOpts, &DiagConsumer,
+ false);
Context.setDiagnosticsEngine(std::move(DiagOpts), &DE);
std::vector<std::string> Args(1, "clang-tidy");
diff --git a/clang-tools-extra/unittests/include/common/VirtualFileHelper.h b/clang-tools-extra/unittests/include/common/VirtualFileHelper.h
index 86991bb3..cb075f8 100644
--- a/clang-tools-extra/unittests/include/common/VirtualFileHelper.h
+++ b/clang-tools-extra/unittests/include/common/VirtualFileHelper.h
@@ -32,8 +32,7 @@ class VirtualFileHelper {
public:
VirtualFileHelper()
- : Diagnostics(IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
- DiagOpts),
+ : Diagnostics(DiagnosticIDs::create(), DiagOpts),
DiagnosticPrinter(llvm::outs(), DiagOpts),
Files((FileSystemOptions())) {}
diff --git a/clang/docs/InternalsManual.rst b/clang/docs/InternalsManual.rst
index 8a44db7..756db85 100644
--- a/clang/docs/InternalsManual.rst
+++ b/clang/docs/InternalsManual.rst
@@ -139,7 +139,7 @@ wording a diagnostic.
you mean %1?``.
* Appropriately capitalize proper nouns like ``Clang``, ``OpenCL``, ``GCC``,
- ``Objective-C``, etc and language standard versions like ``C11`` or ``C++11``.
+ ``Objective-C``, etc. and language standard versions like ``C11`` or ``C++11``.
* The wording should be succinct. If necessary, use a semicolon to combine
sentence fragments instead of using complete sentences. e.g., prefer wording
like ``'%0' is deprecated; it will be removed in a future release of Clang``
@@ -312,7 +312,7 @@ Description:
* number: A simple decimal number matches if the argument is the same as the
number. Example: ``"%plural{1:mouse|:mice}0"``
* range: A range in square brackets matches if the argument is within the
- range. Then range is inclusive on both ends. Example:
+ range. The range is inclusive on both ends. Example:
``"%plural{0:none|1:one|[2,5]:some|:many}0"``
* modulo: A modulo operator is followed by a number, and equals sign and
either a number or a range. The tests are the same as for plain numbers
@@ -341,7 +341,7 @@ Example:
Class:
Integers
Description:
- This is a formatter which represents the argument number in a human readable
+ This is a formatter which represents the argument number in a human-readable
format: the value ``123`` stays ``123``, ``12345`` becomes ``12.34k``,
``6666666` becomes ``6.67M``, and so on for 'G' and 'T'.
@@ -561,7 +561,7 @@ documentation for the ``-verify`` mode can be found at
There are many other possible implementations of this interface, and this is
why we prefer diagnostics to pass down rich structured information in
-arguments. For example, an HTML output might want declaration names be
+arguments. For example, an HTML output might want declaration names to be
linkified to where they come from in the source. Another example is that a GUI
might let you click on typedefs to expand them. This application would want to
pass significantly more information about types through to the GUI than a
@@ -846,7 +846,7 @@ Option Marshalling Infrastructure
The option marshalling infrastructure automates the parsing of the Clang
``-cc1`` frontend command line arguments into ``CompilerInvocation`` and their
generation from ``CompilerInvocation``. The system replaces lots of repetitive
-C++ code with simple, declarative tablegen annotations and it's being used for
+C++ code with simple, declarative tablegen annotations and is being used for
the majority of the ``-cc1`` command line interface. This section provides an
overview of the system.
@@ -886,7 +886,7 @@ a string that the tablegen backend uses as a prefix to the
LANG_OPTION_WITH_MARSHALLING([...], LangOpts->IgnoreExceptions, [...])
#endif // LANG_OPTION_WITH_MARSHALLING
-Such definition can be used used in the function for parsing and generating
+Such definition can be used in the function for parsing and generating
command line:
.. code-block:: c++
@@ -986,7 +986,7 @@ line.
NegFlag<SetFalse, [], [], "Use the new pass manager in LLVM">,
BothFlags<[], [ClangOption, CC1Option]>>;
-With most such pair of flags, the ``-cc1`` frontend accepts only the flag that
+With most such pairs of flags, the ``-cc1`` frontend accepts only the flag that
changes the default key path value. The Clang driver is responsible for
accepting both and either forwarding the changing flag or discarding the flag
that would just set the key path to its default.
@@ -1042,8 +1042,8 @@ and the result is assigned to the key path on success.
The key path defaults to the value specified in ``MarshallingInfoEnum`` prefixed
by the contents of ``NormalizedValuesScope`` and ``::``. This ensures correct
reference to an enum case is formed even if the enum resides in different
-namespace or is an enum class. If the value present on command line does not
-match any of the comma-separated values from ``Values``, an error diagnostics is
+namespace or is an enum class. If the value present on the command line does not
+match any of the comma-separated values from ``Values``, an error diagnostic is
issued. Otherwise, the corresponding element from ``NormalizedValues`` at the
same index is assigned to the key path (also correctly scoped). The number of
comma-separated string values and elements of the array within
@@ -1111,7 +1111,7 @@ The Token class
---------------
The ``Token`` class is used to represent a single lexed token. Tokens are
-intended to be used by the lexer/preprocess and parser libraries, but are not
+intended to be used by the lexer/preprocessor and parser libraries, but are not
intended to live beyond them (for example, they should not live in the ASTs).
Tokens most often live on the stack (or some other location that is efficient
@@ -1253,7 +1253,7 @@ In order to do this, whenever the parser expects a ``tok::identifier`` or
``tok::coloncolon``, it should call the ``TryAnnotateTypeOrScopeToken`` or
``TryAnnotateCXXScopeToken`` methods to form the annotation token. These
methods will maximally form the specified annotation tokens and replace the
-current token with them, if applicable. If the current tokens is not valid for
+current token with them, if applicable. If the current token is not valid for
an annotation token, it will remain an identifier or "``::``" token.
.. _Lexer:
@@ -1276,7 +1276,7 @@ The lexer has a couple of interesting modal features:
This mode is used for lexing within an "``#if 0``" block, for example.
* The lexer can capture and return comments as tokens. This is required to
support the ``-C`` preprocessor mode, which passes comments through, and is
- used by the diagnostic checker to identifier expect-error annotations.
+ used by the diagnostic checker to identify expect-error annotations.
* The lexer can be in ``ParsingFilename`` mode, which happens when
preprocessing after reading a ``#include`` directive. This mode changes the
parsing of "``<``" to return an "angled string" instead of a bunch of tokens
@@ -1308,7 +1308,7 @@ The ``TokenLexer`` class
------------------------
The ``TokenLexer`` class is a token provider that returns tokens from a list of
-tokens that came from somewhere else. It typically used for two things: 1)
+tokens that came from somewhere else. It is typically used for two things: 1)
returning tokens from a macro definition as it is being expanded 2) returning
tokens from an arbitrary buffer of tokens. The later use is used by
``_Pragma`` and will most likely be used to handle unbounded look-ahead for the
@@ -1509,7 +1509,7 @@ type checker must verify that the operand has a pointer type. It would not be
correct to check that with "``isa<PointerType>(SubExpr->getType())``", because
this predicate would fail if the subexpression had a typedef type.
-The solution to this problem are a set of helper methods on ``Type``, used to
+The solution to this problem is a set of helper methods on ``Type``, used to
check their properties. In this case, it would be correct to use
"``SubExpr->getType()->isPointerType()``" to do the check. This predicate will
return true if the *canonical type is a pointer*, which is true any time the
@@ -1632,7 +1632,7 @@ the names are inside the ``DeclarationName`` class).
``CXXLiteralOperatorName``
- The name is a C++11 user defined literal operator. User defined
+ The name is a C++11 user-defined literal operator. User-defined
Literal operators are named according to the suffix they define,
e.g., "``_foo``" for "``operator "" _foo``". Use
``N.getCXXLiteralIdentifier()`` to retrieve the corresponding
@@ -1745,7 +1745,7 @@ will be found by the lookup, since it effectively replaces the first
declaration of "``f``".
(Note that because ``f`` can be redeclared at block scope, or in a friend
-declaration, etc. it is possible that the declaration of ``f`` found by name
+declaration, etc., it is possible that the declaration of ``f`` found by name
lookup will not be the most recent one.)
In the semantics-centric view, overloading of functions is represented
@@ -1945,7 +1945,7 @@ range of iterators over declarations of "``f``".
function ``DeclContext::getPrimaryContext`` retrieves the "primary" context for
a given ``DeclContext`` instance, which is the ``DeclContext`` responsible for
maintaining the lookup table used for the semantics-centric view. Given a
-DeclContext, one can obtain the set of declaration contexts that are
+``DeclContext``, one can obtain the set of declaration contexts that are
semantically connected to this declaration context, in source order, including
this context (which will be the only result, for non-namespace contexts) via
``DeclContext::collectAllContexts``. Note that these functions are used
@@ -1985,7 +1985,7 @@ broken code in the AST:
errors, the Decl node is marked as invalid.
- dropping invalid node: this often happens for errors that we don’t have
graceful recovery. Prior to Recovery AST, a mismatched-argument function call
- expression was dropped though a CallExpr was created for semantic analysis.
+ expression was dropped though a ``CallExpr`` was created for semantic analysis.
With these strategies, clang surfaces better diagnostics, and provides AST
consumers a rich AST reflecting the written source code as much as possible even
@@ -2215,7 +2215,7 @@ Consequently, we must either set the virtual flag for the definition (but then
we create a malformed AST which the parser would never create), or we import
the whole redeclaration chain of the function. The most recent version of the
``ASTImporter`` uses the latter mechanism. We do import all function
-declarations - regardless if they are definitions or prototypes - in the order
+declarations - regardless of whether they are definitions or prototypes - in the order
as they appear in the "from" context.
.. One definition
@@ -2338,7 +2338,7 @@ library receive an Error object, which they must check.
During import of a specific declaration, it may happen that some AST nodes had
already been created before we recognize an error. In this case, we signal back
the error to the caller, but the "to" context remains polluted with those nodes
-which had been created. Ideally, those nodes should not had been created, but
+which had been created. Ideally, those nodes should not have been created, but
that time we did not know about the error, the error happened later. Since the
AST is immutable (most of the cases we can't remove existing nodes) we choose
to mark these nodes as erroneous.
@@ -2579,7 +2579,7 @@ that there are global declarations which collide with declarations from other
translation units, but they are not referenced outside from their translation
unit. These declarations should be in an unnamed namespace ideally. If we treat
these collisions liberally then CTU analysis can find more results. Note, the
-feature be able to choose between name conflict handling strategies is still an
+feature to be able to choose between name conflict handling strategies is still an
ongoing work.
.. _CFG:
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 9d9a000..20cadbf 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -134,6 +134,11 @@ Bug Fixes in This Version
-------------------------
- Fix a crash when marco name is empty in ``#pragma push_macro("")`` or
``#pragma pop_macro("")``. (#GH149762).
+- `-Wunreachable-code`` now diagnoses tautological or contradictory
+ comparisons such as ``x != 0 || x != 1.0`` and ``x == 0 && x == 1.0`` on
+ targets that treat ``_Float16``/``__fp16`` as native scalar types. Previously
+ the warning was silently lost because the operands differed only by an implicit
+ cast chain. (#GH149967).
Bug Fixes to Compiler Builtins
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -242,6 +247,8 @@ New features
Crash and bug fixes
^^^^^^^^^^^^^^^^^^^
+- Fixed a crash in the static analyzer that when the expression in an
+ ``[[assume(expr)]]`` attribute was enclosed in parentheses. (#GH151529)
Improvements
^^^^^^^^^^^^
diff --git a/clang/docs/ShadowCallStack.rst b/clang/docs/ShadowCallStack.rst
index fc8bea8..9b104cc 100644
--- a/clang/docs/ShadowCallStack.rst
+++ b/clang/docs/ShadowCallStack.rst
@@ -61,7 +61,7 @@ The instrumentation makes use of the platform register ``x18`` on AArch64,
``x3`` (``gp``) on RISC-V with software shadow stack and ``ssp`` on RISC-V with
hardware shadow stack, which needs `Zicfiss`_ and ``-fcf-protection=return``.
Users can choose between the software and hardware based shadow stack
-implementation on RISC-V backend by passing ``-fsanitize=shadowcallstack``
+implementation on RISC-V backend by passing ``-fsanitize=shadow-call-stack``
or ``Zicfiss`` with ``-fcf-protection=return``.
For simplicity we will refer to this as the ``SCSReg``. On some platforms,
``SCSReg`` is reserved, and on others, it is designated as a scratch register.
diff --git a/clang/docs/ThinLTO.rst b/clang/docs/ThinLTO.rst
index 569405f..8cb3e0b 100644
--- a/clang/docs/ThinLTO.rst
+++ b/clang/docs/ThinLTO.rst
@@ -249,6 +249,9 @@ during the traditional link step.
The implementation is documented here: https://llvm.org/docs/DTLTO.html.
+Command-Line Options
+^^^^^^^^^^^^^^^^^^^^
+
DTLTO requires the LLD linker (``-fuse-ld=lld``).
``-fthinlto-distributor=<path>``
@@ -260,17 +263,29 @@ DTLTO requires the LLD linker (``-fuse-ld=lld``).
- Can be specified multiple times to pass multiple options.
- Multiple options can also be specified by separating them with commas.
-Examples:
- - ``clang -flto=thin -fthinlto-distributor=incredibuild.exe -Xthinlto-distributor=--verbose,--j10 -fuse-ld=lld``
- - ``clang -flto=thin -fthinlto-distributor=$(which python) -Xthinlto-distributor=incredibuild.py -fuse-ld=lld``
-
If ``-fthinlto-distributor=`` is specified, Clang supplies the path to a
compiler to be executed remotely to perform the ThinLTO backend
compilations. Currently, this is Clang itself.
+Usage
+^^^^^
+
+Compilation is unchanged from ThinLTO. DTLTO options need to supplied for the link step:
+
+.. code-block:: console
+
+ % clang -flto=thin -fthinlto-distributor=distribute.sh -Xthinlto-distributor=--verbose,--j10 -fuse-ld=lld file1.o file2.o
+ % clang -flto=thin -fthinlto-distributor=$(which python) -Xthinlto-distributor=distribute.py -fuse-ld=lld file1.o file2.o
+
+When using lld-link:
+
+.. code-block:: console
+
+ % lld-link /out:a.exe file1.obj file2.obj /thinlto-distributor:distribute.exe /thinlto-remote-compiler:${LLVM}\bin\clang.exe /thinlto-distributor-arg:--verbose
+
Note that currently, DTLTO is only supported in some LLD flavors. Support can
be added to other LLD flavours in the future.
-See `DTLTO <https://lld.llvm.org/dtlto.html>`_ for more information.
+See `DTLTO <https://lld.llvm.org/DTLTO.html>`_ for more information.
More Information
================
diff --git a/clang/docs/analyzer/user-docs/CrossTranslationUnit.rst b/clang/docs/analyzer/user-docs/CrossTranslationUnit.rst
index 700dac0..a04b9f8 100644
--- a/clang/docs/analyzer/user-docs/CrossTranslationUnit.rst
+++ b/clang/docs/analyzer/user-docs/CrossTranslationUnit.rst
@@ -132,7 +132,7 @@ Once we have set up the `PATH` environment variable and we activated the python
.. code-block:: bash
- $ CodeChecker analyze --ctu compile_commands.json -o reports
+ $ CodeChecker analyze --ctu --ctu-ast-mode load-from-pch compile_commands.json -o reports
$ ls -F
compile_commands.json foo.cpp foo.cpp.ast main.cpp reports/
$ tree reports
@@ -318,7 +318,7 @@ Once we have set up the `PATH` environment variable and we activated the python
.. code-block:: bash
- $ CodeChecker analyze --ctu --ctu-ast-loading-mode on-demand compile_commands.json -o reports
+ $ CodeChecker analyze --ctu compile_commands.json -o reports
$ ls -F
compile_commands.json foo.cpp main.cpp reports/
$ tree reports
diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td
index 224cb6a..b3ff45b 100644
--- a/clang/include/clang/Basic/Attr.td
+++ b/clang/include/clang/Basic/Attr.td
@@ -1642,7 +1642,7 @@ def DeviceKernel : DeclOrTypeAttr {
}
def SYCLKernelEntryPoint : InheritableAttr {
- let Spellings = [Clang<"sycl_kernel_entry_point">];
+ let Spellings = [CXX11<"clang", "sycl_kernel_entry_point">];
let Args = [
// KernelName is required and specifies the kernel name type.
TypeArgument<"KernelName">,
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def
index b8ece53..bb3953e 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -697,10 +697,27 @@ TARGET_BUILTIN(__builtin_amdgcn_exp2_bf16, "yy", "nc", "bf16-trans-insts")
TARGET_BUILTIN(__builtin_amdgcn_sin_bf16, "yy", "nc", "bf16-trans-insts")
TARGET_BUILTIN(__builtin_amdgcn_cos_bf16, "yy", "nc", "bf16-trans-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_sr_pk_bf16_f32, "V2yffi", "nc", "bf16-cvt-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_sr_pk_f16_f32, "V2hffi", "nc", "gfx1250-insts")
TARGET_BUILTIN(__builtin_amdgcn_cvt_f16_fp8, "hiIi", "nc", "gfx1250-insts")
TARGET_BUILTIN(__builtin_amdgcn_cvt_f16_bf8, "hiIi", "nc", "gfx1250-insts")
TARGET_BUILTIN(__builtin_amdgcn_cvt_pk_f16_fp8, "V2hs", "nc", "gfx1250-insts")
TARGET_BUILTIN(__builtin_amdgcn_cvt_pk_f16_bf8, "V2hs", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_pk_fp8_f16, "sV2h", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_pk_bf8_f16, "sV2h", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_sr_fp8_f16, "ihiUiIi", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_sr_bf8_f16, "ihiUiIi", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_scale_pk8_f16_fp8, "V8hV2UiUiIUi", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_scale_pk8_bf16_fp8, "V8yV2UiUiIUi", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_scale_pk8_f16_bf8, "V8hV2UiUiIUi", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_scale_pk8_bf16_bf8, "V8yV2UiUiIUi", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_scale_pk8_f16_fp4, "V8hUiUiIUi", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_scale_pk8_bf16_fp4, "V8yUiUiIUi", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_scale_pk8_f32_fp8, "V8fV2UiUiIUi", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_scale_pk8_f32_bf8, "V8fV2UiUiIUi", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_scale_pk8_f32_fp4, "V8fUiUiIUi", "nc", "gfx1250-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_pk_fp8_f32_e5m3, "iffiIb", "nc", "fp8e5m3-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_sr_fp8_f32_e5m3, "ifiiIi", "nc", "fp8e5m3-insts")
TARGET_BUILTIN(__builtin_amdgcn_sat_pk4_i4_i8, "UsUi", "nc", "gfx1250-insts")
TARGET_BUILTIN(__builtin_amdgcn_sat_pk4_u4_u8, "UsUi", "nc", "gfx1250-insts")
diff --git a/clang/include/clang/Basic/BuiltinsNVPTX.td b/clang/include/clang/Basic/BuiltinsNVPTX.td
index 6e531ef..2d6fa17 100644
--- a/clang/include/clang/Basic/BuiltinsNVPTX.td
+++ b/clang/include/clang/Basic/BuiltinsNVPTX.td
@@ -21,13 +21,17 @@ class SM<string version, list<SMFeatures> newer_list> : SMFeatures {
!strconcat(f, "|", newer.Features));
}
+let Features = "sm_121a" in def SM_121a : SMFeatures;
let Features = "sm_120a" in def SM_120a : SMFeatures;
+let Features = "sm_103a" in def SM_103a : SMFeatures;
let Features = "sm_101a" in def SM_101a : SMFeatures;
let Features = "sm_100a" in def SM_100a : SMFeatures;
let Features = "sm_90a" in def SM_90a : SMFeatures;
-def SM_120 : SM<"120", [SM_120a]>;
-def SM_101 : SM<"101", [SM_101a, SM_120]>;
+def SM_121 : SM<"121", [SM_121a]>;
+def SM_120 : SM<"120", [SM_120a, SM_121]>;
+def SM_103 : SM<"103", [SM_103a, SM_120]>;
+def SM_101 : SM<"101", [SM_101a, SM_103]>;
def SM_100 : SM<"100", [SM_100a, SM_101]>;
def SM_90 : SM<"90", [SM_90a, SM_100]>;
def SM_89 : SM<"89", [SM_90]>;
@@ -50,8 +54,9 @@ class PTX<string version, PTXFeatures newer> : PTXFeatures {
let Features = !strconcat("ptx", version, "|", newer.Features);
}
-let Features = "ptx87" in def PTX87 : PTXFeatures;
+let Features = "ptx88" in def PTX88 : PTXFeatures;
+def PTX87 : PTX<"87", PTX88>;
def PTX86 : PTX<"86", PTX87>;
def PTX85 : PTX<"85", PTX86>;
def PTX84 : PTX<"84", PTX85>;
diff --git a/clang/include/clang/Basic/Cuda.h b/clang/include/clang/Basic/Cuda.h
index d6a22a7..81a792d 100644
--- a/clang/include/clang/Basic/Cuda.h
+++ b/clang/include/clang/Basic/Cuda.h
@@ -47,9 +47,10 @@ enum class CudaVersion {
CUDA_125,
CUDA_126,
CUDA_128,
- FULLY_SUPPORTED = CUDA_123,
+ CUDA_129,
+ FULLY_SUPPORTED = CUDA_128,
PARTIALLY_SUPPORTED =
- CUDA_128, // Partially supported. Proceed with a warning.
+ CUDA_129, // Partially supported. Proceed with a warning.
NEW = 10000, // Too new. Issue a warning, but allow using it.
};
const char *CudaVersionToString(CudaVersion V);
diff --git a/clang/include/clang/Basic/CustomizableOptional.h b/clang/include/clang/Basic/CustomizableOptional.h
index 2d6ae6a..8559eaa 100644
--- a/clang/include/clang/Basic/CustomizableOptional.h
+++ b/clang/include/clang/Basic/CustomizableOptional.h
@@ -70,15 +70,6 @@ public:
void reset() { Storage.reset(); }
- LLVM_DEPRECATED("Use &*X instead.", "&*X")
- constexpr const T *getPointer() const { return &Storage.value(); }
- LLVM_DEPRECATED("Use &*X instead.", "&*X")
- T *getPointer() { return &Storage.value(); }
- LLVM_DEPRECATED("std::optional::value is throwing. Use *X instead", "*X")
- constexpr const T &value() const & { return Storage.value(); }
- LLVM_DEPRECATED("std::optional::value is throwing. Use *X instead", "*X")
- T &value() & { return Storage.value(); }
-
constexpr explicit operator bool() const { return has_value(); }
constexpr bool has_value() const { return Storage.has_value(); }
constexpr const T *operator->() const { return &Storage.value(); }
@@ -90,8 +81,6 @@ public:
return has_value() ? operator*() : std::forward<U>(alt);
}
- LLVM_DEPRECATED("std::optional::value is throwing. Use *X instead", "*X")
- T &&value() && { return std::move(Storage.value()); }
T &&operator*() && { return std::move(Storage.value()); }
template <typename U> T value_or(U &&alt) && {
diff --git a/clang/include/clang/Basic/DiagnosticIDs.h b/clang/include/clang/Basic/DiagnosticIDs.h
index f07a003..b21a3b6 100644
--- a/clang/include/clang/Basic/DiagnosticIDs.h
+++ b/clang/include/clang/Basic/DiagnosticIDs.h
@@ -272,6 +272,11 @@ public:
DiagnosticIDs();
~DiagnosticIDs();
+ // Convenience method to construct a new refcounted DiagnosticIDs.
+ static llvm::IntrusiveRefCntPtr<DiagnosticIDs> create() {
+ return llvm::makeIntrusiveRefCnt<DiagnosticIDs>();
+ }
+
/// Return an ID for a diagnostic with the specified format string and
/// level.
///
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 27d2152..94b174c 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -12936,31 +12936,29 @@ def err_sycl_special_type_num_init_method : Error<
// SYCL kernel entry point diagnostics
def err_sycl_entry_point_invalid : Error<
- "'sycl_kernel_entry_point' attribute cannot be applied to a"
+ "the %0 attribute cannot be applied to a"
" %select{non-static member function|variadic function|deleted function|"
"defaulted function|constexpr function|consteval function|"
"function declared with the 'noreturn' attribute|coroutine|"
- "function defined with a function try block}0">;
+ "function defined with a function try block}1">;
def err_sycl_entry_point_invalid_redeclaration : Error<
- "'sycl_kernel_entry_point' kernel name argument does not match prior"
- " declaration%diff{: $ vs $|}0,1">;
+ "the %0 kernel name argument does not match prior"
+ " declaration%diff{: $ vs $|}1,2">;
def err_sycl_kernel_name_conflict : Error<
- "'sycl_kernel_entry_point' kernel name argument conflicts with a previous"
- " declaration">;
+ "the %0 kernel name argument conflicts with a previous declaration">;
def warn_sycl_kernel_name_not_a_class_type : Warning<
"%0 is not a valid SYCL kernel name type; a non-union class type is required">,
InGroup<DiagGroup<"nonportable-sycl">>, DefaultError;
def warn_sycl_entry_point_redundant_declaration : Warning<
- "redundant 'sycl_kernel_entry_point' attribute">, InGroup<RedundantAttribute>;
+ "redundant %0 attribute">, InGroup<RedundantAttribute>;
def err_sycl_entry_point_after_definition : Error<
- "'sycl_kernel_entry_point' attribute cannot be added to a function after the"
- " function is defined">;
+ "the %0 attribute cannot be added to a function after the function is"
+ " defined">;
def err_sycl_entry_point_return_type : Error<
- "'sycl_kernel_entry_point' attribute only applies to functions with a"
- " 'void' return type">;
+ "the %0 attribute only applies to functions with a 'void' return type">;
def err_sycl_entry_point_deduced_return_type : Error<
- "'sycl_kernel_entry_point' attribute only applies to functions with a"
- " non-deduced 'void' return type">;
+ "the %0 attribute only applies to functions with a non-deduced 'void' return"
+ " type">;
def warn_cuda_maxclusterrank_sm_90 : Warning<
"maxclusterrank requires sm_90 or higher, CUDA arch provided: %0, ignoring "
diff --git a/clang/include/clang/Basic/OffloadArch.h b/clang/include/clang/Basic/OffloadArch.h
index 4dda3ec..387a684 100644
--- a/clang/include/clang/Basic/OffloadArch.h
+++ b/clang/include/clang/Basic/OffloadArch.h
@@ -45,8 +45,12 @@ enum class OffloadArch {
SM_100a,
SM_101,
SM_101a,
+ SM_103,
+ SM_103a,
SM_120,
SM_120a,
+ SM_121,
+ SM_121a,
GFX600,
GFX601,
GFX602,
diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index b6dd4ee..b26e558 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -447,6 +447,10 @@ public:
return create<cir::CmpOp>(loc, getBoolTy(), kind, lhs, rhs);
}
+ mlir::Value createIsNaN(mlir::Location loc, mlir::Value operand) {
+ return createCompare(loc, cir::CmpOpKind::ne, operand, operand);
+ }
+
mlir::Value createShift(mlir::Location loc, mlir::Value lhs, mlir::Value rhs,
bool isShiftLeft) {
return create<cir::ShiftOp>(loc, lhs.getType(), lhs, rhs, isShiftLeft);
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 8e16bf8..5ef5b60 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -2823,6 +2823,53 @@ def CIR_ComplexSubOp : CIR_Op<"complex.sub", [
}];
}
+//===----------------------------------------------------------------------===//
+// ComplexMulOp
+//===----------------------------------------------------------------------===//
+
+def CIR_ComplexRangeKind : CIR_I32EnumAttr<
+ "ComplexRangeKind", "complex multiplication and division implementation", [
+ I32EnumAttrCase<"Full", 0, "full">,
+ I32EnumAttrCase<"Improved", 1, "improved">,
+ I32EnumAttrCase<"Promoted", 2, "promoted">,
+ I32EnumAttrCase<"Basic", 3, "basic">,
+]>;
+
+def CIR_ComplexMulOp : CIR_Op<"complex.mul", [
+ Pure, SameOperandsAndResultType
+]> {
+ let summary = "Complex multiplication";
+ let description = [{
+ The `cir.complex.mul` operation takes two complex numbers and returns
+ their product.
+
+ Range is used to select the implementation used when the operation
+ is lowered to the LLVM dialect. For multiplication, 'improved',
+ 'promoted', and 'basic' are all handled equivalently, producing the
+ algebraic formula with no special handling for NaN value. If 'full' is
+ used, a runtime-library function is called if one of the intermediate
+ calculations produced a NaN value.
+
+ Example:
+
+ ```mlir
+ %2 = cir.complex.mul %0, %1 range(basic) : !cir.complex<!cir.float>
+ %2 = cir.complex.mul %0, %1 range(full) : !cir.complex<!cir.float>
+ ```
+ }];
+
+ let arguments = (ins
+ CIR_ComplexType:$lhs,
+ CIR_ComplexType:$rhs,
+ CIR_ComplexRangeKind:$range
+ );
+
+ let results = (outs CIR_ComplexType:$result);
+
+ let assemblyFormat = [{
+ $lhs `,` $rhs `range` `(` $range `)` `:` qualified(type($result)) attr-dict
+ }];
+}
//===----------------------------------------------------------------------===//
// Bit Manipulation Operations
diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h
index ad3329d..adc7b5f 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -217,6 +217,7 @@ struct MissingFeatures {
static bool intrinsics() { return false; }
static bool isMemcpyEquivalentSpecialMember() { return false; }
static bool isTrivialCtorOrDtor() { return false; }
+ static bool lambdaCaptures() { return false; }
static bool lambdaFieldToName() { return false; }
static bool loopInfoStack() { return false; }
static bool lowerAggregateLoadStore() { return false; }
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index eb53821..3c04aeb 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -5312,6 +5312,8 @@ def mextended_const : Flag<["-"], "mextended-const">, Group<m_wasm_Features_Grou
def mno_extended_const : Flag<["-"], "mno-extended-const">, Group<m_wasm_Features_Group>;
def mfp16 : Flag<["-"], "mfp16">, Group<m_wasm_Features_Group>;
def mno_fp16 : Flag<["-"], "mno-fp16">, Group<m_wasm_Features_Group>;
+def mgc : Flag<["-"], "mgc">, Group<m_wasm_Features_Group>;
+def mno_gc : Flag<["-"], "mno-gc">, Group<m_wasm_Features_Group>;
def mmultimemory : Flag<["-"], "mmultimemory">, Group<m_wasm_Features_Group>;
def mno_multimemory : Flag<["-"], "mno-multimemory">, Group<m_wasm_Features_Group>;
def mmultivalue : Flag<["-"], "mmultivalue">, Group<m_wasm_Features_Group>;
diff --git a/clang/include/clang/Frontend/ASTUnit.h b/clang/include/clang/Frontend/ASTUnit.h
index 1286fe4..7dd9aef 100644
--- a/clang/include/clang/Frontend/ASTUnit.h
+++ b/clang/include/clang/Frontend/ASTUnit.h
@@ -445,6 +445,9 @@ public:
const DiagnosticsEngine &getDiagnostics() const { return *Diagnostics; }
DiagnosticsEngine &getDiagnostics() { return *Diagnostics; }
+ llvm::IntrusiveRefCntPtr<DiagnosticsEngine> getDiagnosticsPtr() {
+ return Diagnostics;
+ }
const SourceManager &getSourceManager() const { return *SourceMgr; }
SourceManager &getSourceManager() { return *SourceMgr; }
@@ -918,8 +921,9 @@ public:
bool IncludeCodePatterns, bool IncludeBriefComments,
CodeCompleteConsumer &Consumer,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
- DiagnosticsEngine &Diag, LangOptions &LangOpts,
- SourceManager &SourceMgr, FileManager &FileMgr,
+ llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diag,
+ LangOptions &LangOpts, SourceManager &SourceMgr,
+ FileManager &FileMgr,
SmallVectorImpl<StoredDiagnostic> &StoredDiagnostics,
SmallVectorImpl<const llvm::MemoryBuffer *> &OwnedBuffers,
std::unique_ptr<SyntaxOnlyAction> Act);
diff --git a/clang/include/clang/Frontend/CompilerInstance.h b/clang/include/clang/Frontend/CompilerInstance.h
index 2408367..a24decd 100644
--- a/clang/include/clang/Frontend/CompilerInstance.h
+++ b/clang/include/clang/Frontend/CompilerInstance.h
@@ -361,7 +361,7 @@ public:
}
/// setDiagnostics - Replace the current diagnostics engine.
- void setDiagnostics(DiagnosticsEngine *Value);
+ void setDiagnostics(llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Value);
DiagnosticConsumer &getDiagnosticClient() const {
assert(Diagnostics && Diagnostics->getClient() &&
@@ -420,6 +420,8 @@ public:
/// @{
llvm::vfs::FileSystem &getVirtualFileSystem() const;
+ llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem>
+ getVirtualFileSystemPtr() const;
/// @}
/// @name File Manager
diff --git a/clang/include/clang/Frontend/PrecompiledPreamble.h b/clang/include/clang/Frontend/PrecompiledPreamble.h
index 624df00..565395b 100644
--- a/clang/include/clang/Frontend/PrecompiledPreamble.h
+++ b/clang/include/clang/Frontend/PrecompiledPreamble.h
@@ -84,7 +84,7 @@ public:
static llvm::ErrorOr<PrecompiledPreamble>
Build(const CompilerInvocation &Invocation,
const llvm::MemoryBuffer *MainFileBuffer, PreambleBounds Bounds,
- DiagnosticsEngine &Diagnostics,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
bool StoreInMemory, StringRef StoragePath,
diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index d0ddb2e..8b9e5e0 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -106,25 +106,14 @@ bool InitLink::emit(Compiler<Emitter> *Ctx, const Expr *E) const {
return true;
}
-/// Scope managing label targets.
-template <class Emitter> class LabelScope {
-public:
- virtual ~LabelScope() {}
-
-protected:
- LabelScope(Compiler<Emitter> *Ctx) : Ctx(Ctx) {}
- /// Compiler instance.
- Compiler<Emitter> *Ctx;
-};
-
/// Sets the context for break/continue statements.
-template <class Emitter> class LoopScope final : public LabelScope<Emitter> {
+template <class Emitter> class LoopScope final {
public:
using LabelTy = typename Compiler<Emitter>::LabelTy;
using OptLabelTy = typename Compiler<Emitter>::OptLabelTy;
LoopScope(Compiler<Emitter> *Ctx, LabelTy BreakLabel, LabelTy ContinueLabel)
- : LabelScope<Emitter>(Ctx), OldBreakLabel(Ctx->BreakLabel),
+ : Ctx(Ctx), OldBreakLabel(Ctx->BreakLabel),
OldContinueLabel(Ctx->ContinueLabel),
OldBreakVarScope(Ctx->BreakVarScope),
OldContinueVarScope(Ctx->ContinueVarScope) {
@@ -142,6 +131,7 @@ public:
}
private:
+ Compiler<Emitter> *Ctx;
OptLabelTy OldBreakLabel;
OptLabelTy OldContinueLabel;
VariableScope<Emitter> *OldBreakVarScope;
@@ -149,7 +139,7 @@ private:
};
// Sets the context for a switch scope, mapping labels.
-template <class Emitter> class SwitchScope final : public LabelScope<Emitter> {
+template <class Emitter> class SwitchScope final {
public:
using LabelTy = typename Compiler<Emitter>::LabelTy;
using OptLabelTy = typename Compiler<Emitter>::OptLabelTy;
@@ -157,7 +147,7 @@ public:
SwitchScope(Compiler<Emitter> *Ctx, CaseMap &&CaseLabels, LabelTy BreakLabel,
OptLabelTy DefaultLabel)
- : LabelScope<Emitter>(Ctx), OldBreakLabel(Ctx->BreakLabel),
+ : Ctx(Ctx), OldBreakLabel(Ctx->BreakLabel),
OldDefaultLabel(this->Ctx->DefaultLabel),
OldCaseLabels(std::move(this->Ctx->CaseLabels)),
OldLabelVarScope(Ctx->BreakVarScope) {
@@ -175,6 +165,7 @@ public:
}
private:
+ Compiler<Emitter> *Ctx;
OptLabelTy OldBreakLabel;
OptLabelTy OldDefaultLabel;
CaseMap OldCaseLabels;
@@ -457,13 +448,17 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
assert(isPtrType(*FromT));
assert(isPtrType(*ToT));
if (FromT == ToT) {
- if (CE->getType()->isVoidPointerType())
+ if (CE->getType()->isVoidPointerType() &&
+ !SubExprTy->isFunctionPointerType()) {
return this->delegate(SubExpr);
+ }
if (!this->visit(SubExpr))
return false;
- if (CE->getType()->isFunctionPointerType())
- return true;
+ if (CE->getType()->isFunctionPointerType() ||
+ SubExprTy->isFunctionPointerType()) {
+ return this->emitFnPtrCast(CE);
+ }
if (FromT == PT_Ptr)
return this->emitPtrPtrCast(SubExprTy->isVoidPointerType(), CE);
return true;
@@ -1763,6 +1758,9 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
if (Inits.size() == 1 && E->getType() == Inits[0]->getType())
return this->delegate(Inits[0]);
+ if (!R)
+ return false;
+
auto initPrimitiveField = [=](const Record::Field *FieldToInit,
const Expr *Init, PrimType T,
bool Activate = false) -> bool {
diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h
index 1869500..9a325ab 100644
--- a/clang/lib/AST/ByteCode/Interp.h
+++ b/clang/lib/AST/ByteCode/Interp.h
@@ -2682,6 +2682,14 @@ static inline bool CastFixedPointIntegral(InterpState &S, CodePtr OpPC) {
return true;
}
+static inline bool FnPtrCast(InterpState &S, CodePtr OpPC) {
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
+ << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC);
+ return true;
+}
+
static inline bool PtrPtrCast(InterpState &S, CodePtr OpPC, bool SrcIsVoidPtr) {
const auto &Ptr = S.Stk.peek<Pointer>();
diff --git a/clang/lib/AST/ByteCode/Opcodes.td b/clang/lib/AST/ByteCode/Opcodes.td
index abfed77..95a4433 100644
--- a/clang/lib/AST/ByteCode/Opcodes.td
+++ b/clang/lib/AST/ByteCode/Opcodes.td
@@ -412,7 +412,7 @@ def CheckDecl : Opcode {
def CheckEnumValue : Opcode {
let Args = [ArgEnumDecl];
- let Types = [FixedSizeIntegralTypeClass];
+ let Types = [IntegralTypeClass];
let HasGroup = 1;
}
@@ -735,6 +735,8 @@ def PtrPtrCast : Opcode {
}
+def FnPtrCast : Opcode;
+
def DecayPtr : Opcode {
let Types = [PtrTypeClass, PtrTypeClass];
let HasGroup = 1;
diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp
index d85655b..cd9672d 100644
--- a/clang/lib/AST/Expr.cpp
+++ b/clang/lib/AST/Expr.cpp
@@ -4233,8 +4233,15 @@ bool Expr::isSameComparisonOperand(const Expr* E1, const Expr* E2) {
// template parameters.
const auto *DRE1 = cast<DeclRefExpr>(E1);
const auto *DRE2 = cast<DeclRefExpr>(E2);
- return DRE1->isPRValue() && DRE2->isPRValue() &&
- DRE1->getDecl() == DRE2->getDecl();
+
+ if (DRE1->getDecl() != DRE2->getDecl())
+ return false;
+
+ if ((DRE1->isPRValue() && DRE2->isPRValue()) ||
+ (DRE1->isLValue() && DRE2->isLValue()))
+ return true;
+
+ return false;
}
case ImplicitCastExprClass: {
// Peel off implicit casts.
@@ -4244,7 +4251,8 @@ bool Expr::isSameComparisonOperand(const Expr* E1, const Expr* E2) {
if (!ICE1 || !ICE2)
return false;
if (ICE1->getCastKind() != ICE2->getCastKind())
- return false;
+ return isSameComparisonOperand(ICE1->IgnoreParenImpCasts(),
+ ICE2->IgnoreParenImpCasts());
E1 = ICE1->getSubExpr()->IgnoreParens();
E2 = ICE2->getSubExpr()->IgnoreParens();
// The final cast must be one of these types.
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 9808298..993b64b 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -9741,10 +9741,19 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_AddressSpaceConversion:
if (!Visit(SubExpr))
return false;
- // Bitcasts to cv void* are static_casts, not reinterpret_casts, so are
- // permitted in constant expressions in C++11. Bitcasts from cv void* are
- // also static_casts, but we disallow them as a resolution to DR1312.
- if (!E->getType()->isVoidPointerType()) {
+ if (E->getType()->isFunctionPointerType() ||
+ SubExpr->getType()->isFunctionPointerType()) {
+ // Casting between two function pointer types, or between a function
+ // pointer and an object pointer, is always a reinterpret_cast.
+ CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
+ << Info.Ctx.getLangOpts().CPlusPlus;
+ Result.Designator.setInvalid();
+ } else if (!E->getType()->isVoidPointerType()) {
+ // Bitcasts to cv void* are static_casts, not reinterpret_casts, so are
+ // permitted in constant expressions in C++11. Bitcasts from cv void* are
+ // also static_casts, but we disallow them as a resolution to DR1312.
+ //
// In some circumstances, we permit casting from void* to cv1 T*, when the
// actual pointee object is actually a cv2 T.
bool HasValidResult = !Result.InvalidBase && !Result.Designator.Invalid &&
diff --git a/clang/lib/Analysis/RetainSummaryManager.cpp b/clang/lib/Analysis/RetainSummaryManager.cpp
index 987f894..688efe4 100644
--- a/clang/lib/Analysis/RetainSummaryManager.cpp
+++ b/clang/lib/Analysis/RetainSummaryManager.cpp
@@ -147,8 +147,7 @@ static bool isSubclass(const Decl *D,
static bool isExactClass(const Decl *D, StringRef ClassName) {
using namespace ast_matchers;
- DeclarationMatcher sameClassM =
- cxxRecordDecl(hasName(std::string(ClassName)));
+ DeclarationMatcher sameClassM = cxxRecordDecl(hasName(ClassName));
return !(match(sameClassM, *D, D->getASTContext()).empty());
}
diff --git a/clang/lib/Basic/Cuda.cpp b/clang/lib/Basic/Cuda.cpp
index 53b36d3..dc81b71 100644
--- a/clang/lib/Basic/Cuda.cpp
+++ b/clang/lib/Basic/Cuda.cpp
@@ -44,6 +44,7 @@ static const CudaVersionMapEntry CudaNameVersionMap[] = {
CUDA_ENTRY(12, 5),
CUDA_ENTRY(12, 6),
CUDA_ENTRY(12, 8),
+ CUDA_ENTRY(12, 9),
{"", CudaVersion::NEW, llvm::VersionTuple(std::numeric_limits<int>::max())},
{"unknown", CudaVersion::UNKNOWN, {}} // End of list tombstone.
};
@@ -119,6 +120,11 @@ CudaVersion MinVersionForOffloadArch(OffloadArch A) {
case OffloadArch::SM_120:
case OffloadArch::SM_120a:
return CudaVersion::CUDA_128;
+ case OffloadArch::SM_103:
+ case OffloadArch::SM_103a:
+ case OffloadArch::SM_121:
+ case OffloadArch::SM_121a:
+ return CudaVersion::CUDA_129;
default:
llvm_unreachable("invalid enum");
}
diff --git a/clang/lib/Basic/OffloadArch.cpp b/clang/lib/Basic/OffloadArch.cpp
index dce9ffa..4348178 100644
--- a/clang/lib/Basic/OffloadArch.cpp
+++ b/clang/lib/Basic/OffloadArch.cpp
@@ -33,8 +33,12 @@ static const OffloadArchToStringMap ArchNames[] = {
SM(100a), // Blackwell
SM(101), // Blackwell
SM(101a), // Blackwell
+ SM(103), // Blackwell
+ SM(103a), // Blackwell
SM(120), // Blackwell
SM(120a), // Blackwell
+ SM(121), // Blackwell
+ SM(121a), // Blackwell
GFX(600), // gfx600
GFX(601), // gfx601
GFX(602), // gfx602
diff --git a/clang/lib/Basic/SourceManager.cpp b/clang/lib/Basic/SourceManager.cpp
index b2b1488..343c26e 100644
--- a/clang/lib/Basic/SourceManager.cpp
+++ b/clang/lib/Basic/SourceManager.cpp
@@ -2366,23 +2366,21 @@ size_t SourceManager::getDataStructureSizes() const {
SourceManagerForFile::SourceManagerForFile(StringRef FileName,
StringRef Content) {
- // This is referenced by `FileMgr` and will be released by `FileMgr` when it
- // is deleted.
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
InMemoryFileSystem->addFile(
FileName, 0,
llvm::MemoryBuffer::getMemBuffer(Content, FileName,
/*RequiresNullTerminator=*/false));
// This is passed to `SM` as reference, so the pointer has to be referenced
// in `Environment` so that `FileMgr` can out-live this function scope.
- FileMgr =
- std::make_unique<FileManager>(FileSystemOptions(), InMemoryFileSystem);
+ FileMgr = std::make_unique<FileManager>(FileSystemOptions(),
+ std::move(InMemoryFileSystem));
DiagOpts = std::make_unique<DiagnosticOptions>();
// This is passed to `SM` as reference, so the pointer has to be referenced
// by `Environment` due to the same reason above.
- Diagnostics = std::make_unique<DiagnosticsEngine>(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs), *DiagOpts);
+ Diagnostics =
+ std::make_unique<DiagnosticsEngine>(DiagnosticIDs::create(), *DiagOpts);
SourceMgr = std::make_unique<SourceManager>(*Diagnostics, *FileMgr);
FileEntryRef FE = llvm::cantFail(FileMgr->getFileRef(FileName));
FileID ID =
diff --git a/clang/lib/Basic/Targets/NVPTX.cpp b/clang/lib/Basic/Targets/NVPTX.cpp
index 54b39fd..79995cc 100644
--- a/clang/lib/Basic/Targets/NVPTX.cpp
+++ b/clang/lib/Basic/Targets/NVPTX.cpp
@@ -295,10 +295,16 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
return "1000";
case OffloadArch::SM_101:
case OffloadArch::SM_101a:
- return "1010";
+ return "1010";
+ case OffloadArch::SM_103:
+ case OffloadArch::SM_103a:
+ return "1030";
case OffloadArch::SM_120:
case OffloadArch::SM_120a:
- return "1200";
+ return "1200";
+ case OffloadArch::SM_121:
+ case OffloadArch::SM_121a:
+ return "1210";
}
llvm_unreachable("unhandled OffloadArch");
}();
@@ -307,7 +313,9 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case OffloadArch::SM_90a:
case OffloadArch::SM_100a:
case OffloadArch::SM_101a:
+ case OffloadArch::SM_103a:
case OffloadArch::SM_120a:
+ case OffloadArch::SM_121a:
Builder.defineMacro("__CUDA_ARCH_FEAT_SM" + CUDAArchCode.drop_back() + "_ALL", "1");
break;
default:
diff --git a/clang/lib/Basic/Targets/WebAssembly.cpp b/clang/lib/Basic/Targets/WebAssembly.cpp
index e362350e..55ffe1d 100644
--- a/clang/lib/Basic/Targets/WebAssembly.cpp
+++ b/clang/lib/Basic/Targets/WebAssembly.cpp
@@ -59,12 +59,12 @@ bool WebAssemblyTargetInfo::hasFeature(StringRef Feature) const {
.Case("exception-handling", HasExceptionHandling)
.Case("extended-const", HasExtendedConst)
.Case("fp16", HasFP16)
+ .Case("gc", HasGC)
.Case("multimemory", HasMultiMemory)
.Case("multivalue", HasMultivalue)
.Case("mutable-globals", HasMutableGlobals)
.Case("nontrapping-fptoint", HasNontrappingFPToInt)
.Case("reference-types", HasReferenceTypes)
- .Case("gc", HasGC)
.Case("relaxed-simd", SIMDLevel >= RelaxedSIMD)
.Case("sign-ext", HasSignExt)
.Case("simd128", SIMDLevel >= SIMD128)
@@ -99,6 +99,8 @@ void WebAssemblyTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__wasm_multimemory__");
if (HasFP16)
Builder.defineMacro("__wasm_fp16__");
+ if (HasGC)
+ Builder.defineMacro("__wasm_gc__");
if (HasMultivalue)
Builder.defineMacro("__wasm_multivalue__");
if (HasMutableGlobals)
@@ -107,8 +109,6 @@ void WebAssemblyTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__wasm_nontrapping_fptoint__");
if (HasReferenceTypes)
Builder.defineMacro("__wasm_reference_types__");
- if (HasGC)
- Builder.defineMacro("__wasm_gc__");
if (SIMDLevel >= RelaxedSIMD)
Builder.defineMacro("__wasm_relaxed_simd__");
if (HasSignExt)
@@ -194,6 +194,7 @@ bool WebAssemblyTargetInfo::initFeatureMap(
Features["exception-handling"] = true;
Features["extended-const"] = true;
Features["fp16"] = true;
+ Features["gc"] = true;
Features["multimemory"] = true;
Features["tail-call"] = true;
Features["wide-arithmetic"] = true;
@@ -270,6 +271,14 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
HasFP16 = false;
continue;
}
+ if (Feature == "+gc") {
+ HasGC = true;
+ continue;
+ }
+ if (Feature == "-gc") {
+ HasGC = false;
+ continue;
+ }
if (Feature == "+multimemory") {
HasMultiMemory = true;
continue;
@@ -310,14 +319,6 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
HasReferenceTypes = false;
continue;
}
- if (Feature == "+gc") {
- HasGC = true;
- continue;
- }
- if (Feature == "-gc") {
- HasGC = false;
- continue;
- }
if (Feature == "+relaxed-simd") {
SIMDLevel = std::max(SIMDLevel, RelaxedSIMD);
continue;
diff --git a/clang/lib/Basic/Targets/WebAssembly.h b/clang/lib/Basic/Targets/WebAssembly.h
index c47c8cc..eba7422 100644
--- a/clang/lib/Basic/Targets/WebAssembly.h
+++ b/clang/lib/Basic/Targets/WebAssembly.h
@@ -64,12 +64,12 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
bool HasExceptionHandling = false;
bool HasExtendedConst = false;
bool HasFP16 = false;
+ bool HasGC = false;
bool HasMultiMemory = false;
bool HasMultivalue = false;
bool HasMutableGlobals = false;
bool HasNontrappingFPToInt = false;
bool HasReferenceTypes = false;
- bool HasGC = false;
bool HasSignExt = false;
bool HasTailCall = false;
bool HasWideArithmetic = false;
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h
index a78956b..28576a1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.h
@@ -137,7 +137,7 @@ private:
/// A data-flow flag to make sure getRValue and/or copyInto are not
/// called twice for duplicated IR emission.
- mutable bool isUsed;
+ [[maybe_unused]] mutable bool isUsed;
public:
clang::QualType ty;
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 6527fb5..9e8eaa5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -520,7 +520,7 @@ void CIRGenFunction::emitExprAsInit(const Expr *init, const ValueDecl *d,
llvm_unreachable("bad evaluation kind");
}
-void CIRGenFunction::emitDecl(const Decl &d) {
+void CIRGenFunction::emitDecl(const Decl &d, bool evaluateConditionDecl) {
switch (d.getKind()) {
case Decl::BuiltinTemplate:
case Decl::TranslationUnit:
@@ -608,11 +608,14 @@ void CIRGenFunction::emitDecl(const Decl &d) {
case Decl::UsingDirective: // using namespace X; [C++]
assert(!cir::MissingFeatures::generateDebugInfo());
return;
- case Decl::Var: {
+ case Decl::Var:
+ case Decl::Decomposition: {
const VarDecl &vd = cast<VarDecl>(d);
assert(vd.isLocalVarDecl() &&
"Should not see file-scope variables inside a function!");
emitVarDecl(vd);
+ if (evaluateConditionDecl)
+ maybeEmitDeferredVarDeclInit(&vd);
return;
}
case Decl::OpenACCDeclare:
@@ -632,7 +635,6 @@ void CIRGenFunction::emitDecl(const Decl &d) {
case Decl::ImplicitConceptSpecialization:
case Decl::TopLevelStmt:
case Decl::UsingPack:
- case Decl::Decomposition: // This could be moved to join Decl::Var
case Decl::OMPDeclareReduction:
case Decl::OMPDeclareMapper:
cgm.errorNYI(d.getSourceRange(),
@@ -797,3 +799,11 @@ void CIRGenFunction::emitAutoVarTypeCleanup(
assert(!cir::MissingFeatures::ehCleanupFlags());
ehStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer);
}
+
+void CIRGenFunction::maybeEmitDeferredVarDeclInit(const VarDecl *vd) {
+ if (auto *dd = dyn_cast_if_present<DecompositionDecl>(vd)) {
+ for (auto *b : dd->flat_bindings())
+ if (auto *hd = b->getHoldingVar())
+ emitVarDecl(*hd);
+ }
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index c18498f..d267504 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -584,6 +584,15 @@ LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) {
return lv;
}
+ if (const auto *bd = dyn_cast<BindingDecl>(nd)) {
+ if (e->refersToEnclosingVariableOrCapture()) {
+ assert(!cir::MissingFeatures::lambdaCaptures());
+ cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: lambda captures");
+ return LValue();
+ }
+ return emitLValue(bd->getBinding());
+ }
+
cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type");
return LValue();
}
@@ -1472,9 +1481,10 @@ Address CIRGenFunction::emitArrayToPointerDecay(const Expr *e) {
if (e->getType()->isVariableArrayType())
return addr;
- auto pointeeTy = mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
+ [[maybe_unused]] auto pointeeTy =
+ mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
- mlir::Type arrayTy = convertType(e->getType());
+ [[maybe_unused]] mlir::Type arrayTy = convertType(e->getType());
assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array");
assert(pointeeTy == arrayTy);
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index a09d739..3aa170e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -91,6 +91,14 @@ public:
}
mlir::Value VisitUnaryDeref(const Expr *e);
+
+ mlir::Value VisitUnaryPlus(const UnaryOperator *e);
+
+ mlir::Value VisitPlusMinus(const UnaryOperator *e, cir::UnaryOpKind kind,
+ QualType promotionType);
+
+ mlir::Value VisitUnaryMinus(const UnaryOperator *e);
+
mlir::Value VisitUnaryNot(const UnaryOperator *e);
struct BinOpInfo {
@@ -110,6 +118,7 @@ public:
mlir::Value emitBinAdd(const BinOpInfo &op);
mlir::Value emitBinSub(const BinOpInfo &op);
+ mlir::Value emitBinMul(const BinOpInfo &op);
QualType getPromotionType(QualType ty, bool isDivOpCode = false) {
if (auto *complexTy = ty->getAs<ComplexType>()) {
@@ -142,16 +151,20 @@ public:
HANDLEBINOP(Add)
HANDLEBINOP(Sub)
+ HANDLEBINOP(Mul)
#undef HANDLEBINOP
};
} // namespace
+#ifndef NDEBUG
+// Only used in asserts
static const ComplexType *getComplexType(QualType type) {
type = type.getCanonicalType();
if (const ComplexType *comp = dyn_cast<ComplexType>(type))
return comp;
return cast<ComplexType>(cast<AtomicType>(type)->getValueType());
}
+#endif // NDEBUG
LValue ComplexExprEmitter::emitBinAssignLValue(const BinaryOperator *e,
mlir::Value &value) {
@@ -282,6 +295,41 @@ mlir::Value ComplexExprEmitter::emitCast(CastKind ck, Expr *op,
llvm_unreachable("unknown cast resulting in complex value");
}
+mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *e) {
+ QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
+ mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Plus, promotionTy);
+ if (!promotionTy.isNull()) {
+ cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryPlus emitUnPromotedValue");
+ return {};
+ }
+ return result;
+}
+
+mlir::Value ComplexExprEmitter::VisitPlusMinus(const UnaryOperator *e,
+ cir::UnaryOpKind kind,
+ QualType promotionType) {
+ assert(kind == cir::UnaryOpKind::Plus ||
+ kind == cir::UnaryOpKind::Minus &&
+ "Invalid UnaryOp kind for ComplexType Plus or Minus");
+
+ mlir::Value op;
+ if (!promotionType.isNull())
+ op = cgf.emitPromotedComplexExpr(e->getSubExpr(), promotionType);
+ else
+ op = Visit(e->getSubExpr());
+ return builder.createUnaryOp(cgf.getLoc(e->getExprLoc()), kind, op);
+}
+
+mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *e) {
+ QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
+ mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Minus, promotionTy);
+ if (!promotionTy.isNull()) {
+ cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryMinus emitUnPromotedValue");
+ return {};
+ }
+ return result;
+}
+
mlir::Value ComplexExprEmitter::emitConstant(
const CIRGenFunction::ConstantEmission &constant, Expr *e) {
assert(constant && "not a constant");
@@ -534,13 +582,22 @@ mlir::Value ComplexExprEmitter::emitPromoted(const Expr *e,
return emitBin##OP(emitBinOps(bo, promotionTy));
HANDLE_BINOP(Add)
HANDLE_BINOP(Sub)
+ HANDLE_BINOP(Mul)
#undef HANDLE_BINOP
default:
break;
}
- } else if (isa<UnaryOperator>(e)) {
- cgf.cgm.errorNYI("emitPromoted UnaryOperator");
- return {};
+ } else if (const auto *unaryOp = dyn_cast<UnaryOperator>(e)) {
+ switch (unaryOp->getOpcode()) {
+ case UO_Minus:
+ case UO_Plus: {
+ auto kind = unaryOp->getOpcode() == UO_Plus ? cir::UnaryOpKind::Plus
+ : cir::UnaryOpKind::Minus;
+ return VisitPlusMinus(unaryOp, kind, promotionTy);
+ }
+ default:
+ break;
+ }
}
mlir::Value result = Visit(const_cast<Expr *>(e));
@@ -585,6 +642,31 @@ mlir::Value ComplexExprEmitter::emitBinSub(const BinOpInfo &op) {
return builder.create<cir::ComplexSubOp>(op.loc, op.lhs, op.rhs);
}
+static cir::ComplexRangeKind
+getComplexRangeAttr(LangOptions::ComplexRangeKind range) {
+ switch (range) {
+ case LangOptions::CX_Full:
+ return cir::ComplexRangeKind::Full;
+ case LangOptions::CX_Improved:
+ return cir::ComplexRangeKind::Improved;
+ case LangOptions::CX_Promoted:
+ return cir::ComplexRangeKind::Promoted;
+ case LangOptions::CX_Basic:
+ return cir::ComplexRangeKind::Basic;
+ case LangOptions::CX_None:
+ // The default value for ComplexRangeKind is Full is no option is selected
+ return cir::ComplexRangeKind::Full;
+ }
+}
+
+mlir::Value ComplexExprEmitter::emitBinMul(const BinOpInfo &op) {
+ assert(!cir::MissingFeatures::fastMathFlags());
+ assert(!cir::MissingFeatures::cgFPOptionsRAII());
+ cir::ComplexRangeKind rangeKind =
+ getComplexRangeAttr(op.fpFeatures.getComplexRange());
+ return builder.create<cir::ComplexMulOp>(op.loc, op.lhs, op.rhs, rangeKind);
+}
+
LValue CIRGenFunction::emitComplexAssignmentLValue(const BinaryOperator *e) {
assert(e->getOpcode() == BO_Assign && "Expected assign op");
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 2523b0f..f62be49 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -439,7 +439,7 @@ public:
value = builder.getTrue(cgf.getLoc(e->getExprLoc()));
} else if (type->isIntegerType()) {
QualType promotedType;
- bool canPerformLossyDemotionCheck = false;
+ [[maybe_unused]] bool canPerformLossyDemotionCheck = false;
if (cgf.getContext().isPromotableIntegerType(type)) {
promotedType = cgf.getContext().getPromotedIntegerType(type);
assert(promotedType != type && "Shouldn't promote to the same type.");
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index c65d025..0c9bc38 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -216,8 +216,7 @@ void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
mlir::Location loc, CharUnits alignment,
bool isParam) {
- const auto *namedVar = dyn_cast_or_null<NamedDecl>(var);
- assert(namedVar && "Needs a named decl");
+ assert(isa<NamedDecl>(var) && "Needs a named decl");
assert(!cir::MissingFeatures::cgfSymbolTable());
auto allocaOp = cast<cir::AllocaOp>(addrVal.getDefiningOp());
@@ -943,6 +942,7 @@ void CIRGenFunction::emitVariablyModifiedType(QualType type) {
case Type::HLSLInlineSpirv:
case Type::PredefinedSugar:
cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
+ break;
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 603f750..f9c8636 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -870,6 +870,8 @@ public:
void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
clang::QualType::DestructionKind dtorKind);
+ void maybeEmitDeferredVarDeclInit(const VarDecl *vd);
+
void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl,
CXXCtorInitializer *baseInit);
@@ -1059,7 +1061,7 @@ public:
void emitCompoundStmtWithoutScope(const clang::CompoundStmt &s);
- void emitDecl(const clang::Decl &d);
+ void emitDecl(const clang::Decl &d, bool evaluateConditionDecl = false);
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s);
LValue emitDeclRefLValue(const clang::DeclRefExpr *e);
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 623b84f..b143682 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -656,8 +656,6 @@ mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty,
void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd,
bool isTentative) {
- const QualType astTy = vd->getType();
-
if (getLangOpts().OpenCL || getLangOpts().OpenMPIsTargetDevice) {
errorNYI(vd->getSourceRange(), "emit OpenCL/OpenMP global variable");
return;
@@ -701,7 +699,7 @@ void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd,
// never attempt to emit a tentative definition if a real one
// exists. A use may still exists, however, so we still may need
// to do a RAUW.
- assert(!astTy->isIncompleteType() && "Unexpected incomplete type");
+ assert(!vd->getType()->isIncompleteType() && "Unexpected incomplete type");
init = builder.getZeroInitAttr(convertType(vd->getType()));
} else {
emitter.emplace(*this);
@@ -1308,8 +1306,13 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) {
break;
}
- case Decl::Var: {
+ case Decl::Var:
+ case Decl::Decomposition: {
auto *vd = cast<VarDecl>(decl);
+ if (isa<DecompositionDecl>(decl)) {
+ errorNYI(decl->getSourceRange(), "global variable decompositions");
+ break;
+ }
emitGlobal(vd);
break;
}
@@ -1331,8 +1334,14 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) {
break;
// No code generation needed.
- case Decl::UsingShadow:
+ case Decl::ClassTemplate:
+ case Decl::Concept:
+ case Decl::CXXDeductionGuide:
case Decl::Empty:
+ case Decl::FunctionTemplate:
+ case Decl::StaticAssert:
+ case Decl::TypeAliasTemplate:
+ case Decl::UsingShadow:
break;
case Decl::CXXConstructor:
diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
index e4ec380..0c8ff4bd 100644
--- a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
@@ -91,6 +91,9 @@ struct CIRRecordLowering final {
return astContext.getTargetInfo().getABI().starts_with("aapcs");
}
+ /// Helper function to check if the target machine is BigEndian.
+ bool isBigEndian() const { return astContext.getTargetInfo().isBigEndian(); }
+
CharUnits bitsToCharUnits(uint64_t bitOffset) {
return astContext.toCharUnitsFromBits(bitOffset);
}
@@ -771,7 +774,104 @@ void CIRRecordLowering::computeVolatileBitfields() {
!cirGenTypes.getCGModule().getCodeGenOpts().AAPCSBitfieldWidth)
return;
- assert(!cir::MissingFeatures::armComputeVolatileBitfields());
+ for (auto &[field, info] : bitFields) {
+ mlir::Type resLTy = cirGenTypes.convertTypeForMem(field->getType());
+
+ if (astContext.toBits(astRecordLayout.getAlignment()) <
+ getSizeInBits(resLTy).getQuantity())
+ continue;
+
+ // CIRRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
+ // for big-endian targets, but it assumes a container of width
+ // info.storageSize. Since AAPCS uses a different container size (width
+ // of the type), we first undo that calculation here and redo it once
+ // the bit-field offset within the new container is calculated.
+ const unsigned oldOffset =
+ isBigEndian() ? info.storageSize - (info.offset + info.size)
+ : info.offset;
+ // Offset to the bit-field from the beginning of the struct.
+ const unsigned absoluteOffset =
+ astContext.toBits(info.storageOffset) + oldOffset;
+
+ // Container size is the width of the bit-field type.
+ const unsigned storageSize = getSizeInBits(resLTy).getQuantity();
+ // Nothing to do if the access uses the desired
+ // container width and is naturally aligned.
+ if (info.storageSize == storageSize && (oldOffset % storageSize == 0))
+ continue;
+
+ // Offset within the container.
+ unsigned offset = absoluteOffset & (storageSize - 1);
+ // Bail out if an aligned load of the container cannot cover the entire
+ // bit-field. This can happen for example, if the bit-field is part of a
+ // packed struct. AAPCS does not define access rules for such cases, we let
+ // clang to follow its own rules.
+ if (offset + info.size > storageSize)
+ continue;
+
+ // Re-adjust offsets for big-endian targets.
+ if (isBigEndian())
+ offset = storageSize - (offset + info.size);
+
+ const CharUnits storageOffset =
+ astContext.toCharUnitsFromBits(absoluteOffset & ~(storageSize - 1));
+ const CharUnits end = storageOffset +
+ astContext.toCharUnitsFromBits(storageSize) -
+ CharUnits::One();
+
+ const ASTRecordLayout &layout =
+ astContext.getASTRecordLayout(field->getParent());
+ // If we access outside memory outside the record, than bail out.
+ const CharUnits recordSize = layout.getSize();
+ if (end >= recordSize)
+ continue;
+
+ // Bail out if performing this load would access non-bit-fields members.
+ bool conflict = false;
+ for (const auto *f : recordDecl->fields()) {
+ // Allow sized bit-fields overlaps.
+ if (f->isBitField() && !f->isZeroLengthBitField())
+ continue;
+
+ const CharUnits fOffset = astContext.toCharUnitsFromBits(
+ layout.getFieldOffset(f->getFieldIndex()));
+
+ // As C11 defines, a zero sized bit-field defines a barrier, so
+ // fields after and before it should be race condition free.
+ // The AAPCS acknowledges it and imposes no restritions when the
+ // natural container overlaps a zero-length bit-field.
+ if (f->isZeroLengthBitField()) {
+ if (end > fOffset && storageOffset < fOffset) {
+ conflict = true;
+ break;
+ }
+ }
+
+ const CharUnits fEnd =
+ fOffset +
+ astContext.toCharUnitsFromBits(astContext.toBits(
+ getSizeInBits(cirGenTypes.convertTypeForMem(f->getType())))) -
+ CharUnits::One();
+ // If no overlap, continue.
+ if (end < fOffset || fEnd < storageOffset)
+ continue;
+
+ // The desired load overlaps a non-bit-field member, bail out.
+ conflict = true;
+ break;
+ }
+
+ if (conflict)
+ continue;
+ // Write the new bit-field access parameters.
+ // As the storage offset now is defined as the number of elements from the
+ // start of the structure, we should divide the Offset by the element size.
+ info.volatileStorageOffset =
+ storageOffset /
+ astContext.toCharUnitsFromBits(storageSize).getQuantity();
+ info.volatileStorageSize = storageSize;
+ info.volatileOffset = offset;
+ }
}
void CIRRecordLowering::accumulateBases(const CXXRecordDecl *cxxRecordDecl) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index 21bee33..50642e7 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -79,14 +79,15 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s,
#define EXPR(Type, Base) case Stmt::Type##Class:
#include "clang/AST/StmtNodes.inc"
{
- // Remember the block we came in on.
- mlir::Block *incoming = builder.getInsertionBlock();
- assert(incoming && "expression emission must have an insertion point");
+ assert(builder.getInsertionBlock() &&
+ "expression emission must have an insertion point");
emitIgnoredExpr(cast<Expr>(s));
- mlir::Block *outgoing = builder.getInsertionBlock();
- assert(outgoing && "expression emission cleared block!");
+ // Classic codegen has a check here to see if the emitter created a new
+ // block that isn't used (comparing the incoming and outgoing insertion
+ // points) and deletes the outgoing block if it's not used. In CIR, we
+ // will handle that during the cir.canonicalize pass.
return mlir::success();
}
case Stmt::IfStmtClass:
@@ -363,8 +364,8 @@ mlir::LogicalResult CIRGenFunction::emitIfStmt(const IfStmt &s) {
mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &s) {
assert(builder.getInsertionBlock() && "expected valid insertion point");
- for (const Decl *I : s.decls())
- emitDecl(*I);
+ for (const Decl *i : s.decls())
+ emitDecl(*i, /*evaluateConditionDecl=*/true);
return mlir::success();
}
@@ -875,7 +876,7 @@ mlir::LogicalResult CIRGenFunction::emitSwitchStmt(const clang::SwitchStmt &s) {
return mlir::failure();
if (s.getConditionVariable())
- emitDecl(*s.getConditionVariable());
+ emitDecl(*s.getConditionVariable(), /*evaluateConditionDecl=*/true);
mlir::Value condV = emitScalarExpr(s.getCond());
diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
index ce3b30d..66260eb 100644
--- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
@@ -15,7 +15,6 @@
#include "clang/CIR/Dialect/Passes.h"
#include "clang/CIR/MissingFeatures.h"
-#include <iostream>
#include <memory>
using namespace mlir;
@@ -28,21 +27,47 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> {
void runOnOp(mlir::Operation *op);
void lowerCastOp(cir::CastOp op);
+ void lowerComplexMulOp(cir::ComplexMulOp op);
void lowerUnaryOp(cir::UnaryOp op);
void lowerArrayDtor(cir::ArrayDtor op);
void lowerArrayCtor(cir::ArrayCtor op);
+ cir::FuncOp buildRuntimeFunction(
+ mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc,
+ cir::FuncType type,
+ cir::GlobalLinkageKind linkage = cir::GlobalLinkageKind::ExternalLinkage);
+
///
/// AST related
/// -----------
clang::ASTContext *astCtx;
+ /// Tracks current module.
+ mlir::ModuleOp mlirModule;
+
void setASTContext(clang::ASTContext *c) { astCtx = c; }
};
} // namespace
+cir::FuncOp LoweringPreparePass::buildRuntimeFunction(
+ mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc,
+ cir::FuncType type, cir::GlobalLinkageKind linkage) {
+ cir::FuncOp f = dyn_cast_or_null<FuncOp>(SymbolTable::lookupNearestSymbolFrom(
+ mlirModule, StringAttr::get(mlirModule->getContext(), name)));
+ if (!f) {
+ f = builder.create<cir::FuncOp>(loc, name, type);
+ f.setLinkageAttr(
+ cir::GlobalLinkageKindAttr::get(builder.getContext(), linkage));
+ mlir::SymbolTable::setSymbolVisibility(
+ f, mlir::SymbolTable::Visibility::Private);
+
+ assert(!cir::MissingFeatures::opFuncExtraAttrs());
+ }
+ return f;
+}
+
static mlir::Value lowerScalarToComplexCast(mlir::MLIRContext &ctx,
cir::CastOp op) {
cir::CIRBaseBuilderTy builder(ctx);
@@ -128,6 +153,124 @@ void LoweringPreparePass::lowerCastOp(cir::CastOp op) {
}
}
+static mlir::Value buildComplexBinOpLibCall(
+ LoweringPreparePass &pass, CIRBaseBuilderTy &builder,
+ llvm::StringRef (*libFuncNameGetter)(llvm::APFloat::Semantics),
+ mlir::Location loc, cir::ComplexType ty, mlir::Value lhsReal,
+ mlir::Value lhsImag, mlir::Value rhsReal, mlir::Value rhsImag) {
+ cir::FPTypeInterface elementTy =
+ mlir::cast<cir::FPTypeInterface>(ty.getElementType());
+
+ llvm::StringRef libFuncName = libFuncNameGetter(
+ llvm::APFloat::SemanticsToEnum(elementTy.getFloatSemantics()));
+ llvm::SmallVector<mlir::Type, 4> libFuncInputTypes(4, elementTy);
+
+ cir::FuncType libFuncTy = cir::FuncType::get(libFuncInputTypes, ty);
+
+ // Insert a declaration for the runtime function to be used in Complex
+ // multiplication and division when needed
+ cir::FuncOp libFunc;
+ {
+ mlir::OpBuilder::InsertionGuard ipGuard{builder};
+ builder.setInsertionPointToStart(pass.mlirModule.getBody());
+ libFunc = pass.buildRuntimeFunction(builder, libFuncName, loc, libFuncTy);
+ }
+
+ cir::CallOp call =
+ builder.createCallOp(loc, libFunc, {lhsReal, lhsImag, rhsReal, rhsImag});
+ return call.getResult();
+}
+
+static llvm::StringRef
+getComplexMulLibCallName(llvm::APFloat::Semantics semantics) {
+ switch (semantics) {
+ case llvm::APFloat::S_IEEEhalf:
+ return "__mulhc3";
+ case llvm::APFloat::S_IEEEsingle:
+ return "__mulsc3";
+ case llvm::APFloat::S_IEEEdouble:
+ return "__muldc3";
+ case llvm::APFloat::S_PPCDoubleDouble:
+ return "__multc3";
+ case llvm::APFloat::S_x87DoubleExtended:
+ return "__mulxc3";
+ case llvm::APFloat::S_IEEEquad:
+ return "__multc3";
+ default:
+ llvm_unreachable("unsupported floating point type");
+ }
+}
+
+static mlir::Value lowerComplexMul(LoweringPreparePass &pass,
+ CIRBaseBuilderTy &builder,
+ mlir::Location loc, cir::ComplexMulOp op,
+ mlir::Value lhsReal, mlir::Value lhsImag,
+ mlir::Value rhsReal, mlir::Value rhsImag) {
+ // (a+bi) * (c+di) = (ac-bd) + (ad+bc)i
+ mlir::Value resultRealLhs =
+ builder.createBinop(loc, lhsReal, cir::BinOpKind::Mul, rhsReal);
+ mlir::Value resultRealRhs =
+ builder.createBinop(loc, lhsImag, cir::BinOpKind::Mul, rhsImag);
+ mlir::Value resultImagLhs =
+ builder.createBinop(loc, lhsReal, cir::BinOpKind::Mul, rhsImag);
+ mlir::Value resultImagRhs =
+ builder.createBinop(loc, lhsImag, cir::BinOpKind::Mul, rhsReal);
+ mlir::Value resultReal = builder.createBinop(
+ loc, resultRealLhs, cir::BinOpKind::Sub, resultRealRhs);
+ mlir::Value resultImag = builder.createBinop(
+ loc, resultImagLhs, cir::BinOpKind::Add, resultImagRhs);
+ mlir::Value algebraicResult =
+ builder.createComplexCreate(loc, resultReal, resultImag);
+
+ cir::ComplexType complexTy = op.getType();
+ cir::ComplexRangeKind rangeKind = op.getRange();
+ if (mlir::isa<cir::IntType>(complexTy.getElementType()) ||
+ rangeKind == cir::ComplexRangeKind::Basic ||
+ rangeKind == cir::ComplexRangeKind::Improved ||
+ rangeKind == cir::ComplexRangeKind::Promoted)
+ return algebraicResult;
+
+ assert(!cir::MissingFeatures::fastMathFlags());
+
+ // Check whether the real part and the imaginary part of the result are both
+ // NaN. If so, emit a library call to compute the multiplication instead.
+ // We check a value against NaN by comparing the value against itself.
+ mlir::Value resultRealIsNaN = builder.createIsNaN(loc, resultReal);
+ mlir::Value resultImagIsNaN = builder.createIsNaN(loc, resultImag);
+ mlir::Value resultRealAndImagAreNaN =
+ builder.createLogicalAnd(loc, resultRealIsNaN, resultImagIsNaN);
+
+ return builder
+ .create<cir::TernaryOp>(
+ loc, resultRealAndImagAreNaN,
+ [&](mlir::OpBuilder &, mlir::Location) {
+ mlir::Value libCallResult = buildComplexBinOpLibCall(
+ pass, builder, &getComplexMulLibCallName, loc, complexTy,
+ lhsReal, lhsImag, rhsReal, rhsImag);
+ builder.createYield(loc, libCallResult);
+ },
+ [&](mlir::OpBuilder &, mlir::Location) {
+ builder.createYield(loc, algebraicResult);
+ })
+ .getResult();
+}
+
+void LoweringPreparePass::lowerComplexMulOp(cir::ComplexMulOp op) {
+ cir::CIRBaseBuilderTy builder(getContext());
+ builder.setInsertionPointAfter(op);
+ mlir::Location loc = op.getLoc();
+ mlir::TypedValue<cir::ComplexType> lhs = op.getLhs();
+ mlir::TypedValue<cir::ComplexType> rhs = op.getRhs();
+ mlir::Value lhsReal = builder.createComplexReal(loc, lhs);
+ mlir::Value lhsImag = builder.createComplexImag(loc, lhs);
+ mlir::Value rhsReal = builder.createComplexReal(loc, rhs);
+ mlir::Value rhsImag = builder.createComplexImag(loc, rhs);
+ mlir::Value loweredResult = lowerComplexMul(*this, builder, loc, op, lhsReal,
+ lhsImag, rhsReal, rhsImag);
+ op.replaceAllUsesWith(loweredResult);
+ op.erase();
+}
+
void LoweringPreparePass::lowerUnaryOp(cir::UnaryOp op) {
mlir::Type ty = op.getType();
if (!mlir::isa<cir::ComplexType>(ty))
@@ -155,7 +298,8 @@ void LoweringPreparePass::lowerUnaryOp(cir::UnaryOp op) {
case cir::UnaryOpKind::Plus:
case cir::UnaryOpKind::Minus:
- llvm_unreachable("Complex unary Plus/Minus NYI");
+ resultReal = builder.createUnaryOp(loc, opKind, operandReal);
+ resultImag = builder.createUnaryOp(loc, opKind, operandImag);
break;
case cir::UnaryOpKind::Not:
@@ -268,18 +412,22 @@ void LoweringPreparePass::runOnOp(mlir::Operation *op) {
lowerArrayDtor(arrayDtor);
else if (auto cast = mlir::dyn_cast<cir::CastOp>(op))
lowerCastOp(cast);
+ else if (auto complexMul = mlir::dyn_cast<cir::ComplexMulOp>(op))
+ lowerComplexMulOp(complexMul);
else if (auto unary = mlir::dyn_cast<cir::UnaryOp>(op))
lowerUnaryOp(unary);
}
void LoweringPreparePass::runOnOperation() {
mlir::Operation *op = getOperation();
+ if (isa<::mlir::ModuleOp>(op))
+ mlirModule = cast<::mlir::ModuleOp>(op);
llvm::SmallVector<mlir::Operation *> opsToTransform;
op->walk([&](mlir::Operation *op) {
- if (mlir::isa<cir::ArrayCtor, cir::ArrayDtor, cir::CastOp, cir::UnaryOp>(
- op))
+ if (mlir::isa<cir::ArrayCtor, cir::ArrayDtor, cir::CastOp,
+ cir::ComplexMulOp, cir::UnaryOp>(op))
opsToTransform.push_back(op);
});
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 3f784fc..e1f7ea0 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -1148,7 +1148,8 @@ llvm::Value *CodeGenFunction::emitCountedByPointerSize(
assert(E->getCastKind() == CK_LValueToRValue &&
"must be an LValue to RValue cast");
- const MemberExpr *ME = dyn_cast<MemberExpr>(E->getSubExpr());
+ const MemberExpr *ME =
+ dyn_cast<MemberExpr>(E->getSubExpr()->IgnoreParenNoopCasts(getContext()));
if (!ME)
return nullptr;
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index 7a69b5d..1ce834d 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -643,16 +643,7 @@ unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc, bool Force) {
}
StringRef CGDebugInfo::getCurrentDirname() {
- if (!CGM.getCodeGenOpts().DebugCompilationDir.empty())
- return CGM.getCodeGenOpts().DebugCompilationDir;
-
- if (!CWDName.empty())
- return CWDName;
- llvm::ErrorOr<std::string> CWD =
- CGM.getFileSystem()->getCurrentWorkingDirectory();
- if (!CWD)
- return StringRef();
- return CWDName = internString(*CWD);
+ return CGM.getCodeGenOpts().DebugCompilationDir;
}
void CGDebugInfo::CreateCompileUnit() {
@@ -3248,6 +3239,9 @@ llvm::DIModule *CGDebugInfo::getOrCreateModuleRef(ASTSourceDescriptor Mod,
std::string Remapped = remapDIPath(Path);
StringRef Relative(Remapped);
StringRef CompDir = TheCU->getDirectory();
+ if (CompDir.empty())
+ return Remapped;
+
if (Relative.consume_front(CompDir))
Relative.consume_front(llvm::sys::path::get_separator());
@@ -4807,7 +4801,7 @@ void CGDebugInfo::EmitFuncDeclForCallSite(llvm::CallBase *CallOrInvoke,
const FunctionDecl *CalleeDecl) {
if (!CallOrInvoke)
return;
- auto *Func = CallOrInvoke->getCalledFunction();
+ auto *Func = dyn_cast<llvm::Function>(CallOrInvoke->getCalledOperand());
if (!Func)
return;
if (Func->getSubprogram())
diff --git a/clang/lib/CodeGen/CGDebugInfo.h b/clang/lib/CodeGen/CGDebugInfo.h
index 411b2e7..497d3a6 100644
--- a/clang/lib/CodeGen/CGDebugInfo.h
+++ b/clang/lib/CodeGen/CGDebugInfo.h
@@ -158,7 +158,6 @@ class CGDebugInfo {
/// This is a storage for names that are constructed on demand. For
/// example, C++ destructors, C++ operators etc..
llvm::BumpPtrAllocator DebugInfoNames;
- StringRef CWDName;
llvm::DenseMap<const char *, llvm::TrackingMDRef> DIFileCache;
llvm::DenseMap<const FunctionDecl *, llvm::TrackingMDRef> SPCache;
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index e25b694..04c9192 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -2278,8 +2278,12 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(const OMPRequiresDecl *D) {
case OffloadArch::SM_100a:
case OffloadArch::SM_101:
case OffloadArch::SM_101a:
+ case OffloadArch::SM_103:
+ case OffloadArch::SM_103a:
case OffloadArch::SM_120:
case OffloadArch::SM_120a:
+ case OffloadArch::SM_121:
+ case OffloadArch::SM_121a:
case OffloadArch::GFX600:
case OffloadArch::GFX601:
case OffloadArch::GFX602:
diff --git a/clang/lib/CodeGen/CodeGenAction.cpp b/clang/lib/CodeGen/CodeGenAction.cpp
index 2c0767f..dc54c97 100644
--- a/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/clang/lib/CodeGen/CodeGenAction.cpp
@@ -978,7 +978,7 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
CI.getPreprocessor());
std::unique_ptr<BackendConsumer> Result(new BackendConsumer(
- CI, BA, &CI.getVirtualFileSystem(), *VMContext, std::move(LinkModules),
+ CI, BA, CI.getVirtualFileSystemPtr(), *VMContext, std::move(LinkModules),
InFile, std::move(OS), CoverageInfo));
BEConsumer = Result.get();
@@ -1156,7 +1156,7 @@ void CodeGenAction::ExecuteAction() {
// Set clang diagnostic handler. To do this we need to create a fake
// BackendConsumer.
- BackendConsumer Result(CI, BA, &CI.getVirtualFileSystem(), *VMContext,
+ BackendConsumer Result(CI, BA, CI.getVirtualFileSystemPtr(), *VMContext,
std::move(LinkModules), "", nullptr, nullptr,
TheModule.get());
diff --git a/clang/lib/CodeGen/CoverageMappingGen.cpp b/clang/lib/CodeGen/CoverageMappingGen.cpp
index 4aafac3..38aaceb 100644
--- a/clang/lib/CodeGen/CoverageMappingGen.cpp
+++ b/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -2449,12 +2449,7 @@ CoverageMappingModuleGen::CoverageMappingModuleGen(
: CGM(CGM), SourceInfo(SourceInfo) {}
std::string CoverageMappingModuleGen::getCurrentDirname() {
- if (!CGM.getCodeGenOpts().CoverageCompilationDir.empty())
- return CGM.getCodeGenOpts().CoverageCompilationDir;
-
- SmallString<256> CWD;
- llvm::sys::fs::current_path(CWD);
- return CWD.str().str();
+ return CGM.getCodeGenOpts().CoverageCompilationDir;
}
std::string CoverageMappingModuleGen::normalizeFilename(StringRef Filename) {
diff --git a/clang/lib/CodeGen/ObjectFilePCHContainerWriter.cpp b/clang/lib/CodeGen/ObjectFilePCHContainerWriter.cpp
index 95971e5..074f2a5 100644
--- a/clang/lib/CodeGen/ObjectFilePCHContainerWriter.cpp
+++ b/clang/lib/CodeGen/ObjectFilePCHContainerWriter.cpp
@@ -146,7 +146,7 @@ public:
: CI(CI), Diags(CI.getDiagnostics()), MainFileName(MainFileName),
OutputFileName(OutputFileName), Ctx(nullptr),
MMap(CI.getPreprocessor().getHeaderSearchInfo().getModuleMap()),
- FS(&CI.getVirtualFileSystem()),
+ FS(CI.getVirtualFileSystemPtr()),
HeaderSearchOpts(CI.getHeaderSearchOpts()),
PreprocessorOpts(CI.getPreprocessorOpts()),
TargetOpts(CI.getTargetOpts()), LangOpts(CI.getLangOpts()),
diff --git a/clang/lib/CrossTU/CrossTranslationUnit.cpp b/clang/lib/CrossTU/CrossTranslationUnit.cpp
index 6d0f042..fb2a79a 100644
--- a/clang/lib/CrossTU/CrossTranslationUnit.cpp
+++ b/clang/lib/CrossTU/CrossTranslationUnit.cpp
@@ -563,9 +563,8 @@ CrossTranslationUnitContext::ASTLoader::loadFromDump(StringRef ASTDumpPath) {
auto DiagOpts = std::make_shared<DiagnosticOptions>();
TextDiagnosticPrinter *DiagClient =
new TextDiagnosticPrinter(llvm::errs(), *DiagOpts);
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
- new DiagnosticsEngine(DiagID, *DiagOpts, DiagClient));
+ auto Diags = llvm::makeIntrusiveRefCnt<DiagnosticsEngine>(
+ DiagnosticIDs::create(), *DiagOpts, DiagClient);
return ASTUnit::LoadFromASTFile(
ASTDumpPath, CI.getPCHContainerOperations()->getRawReader(),
ASTUnit::LoadEverything, DiagOpts, Diags, CI.getFileSystemOpts(),
@@ -607,8 +606,8 @@ CrossTranslationUnitContext::ASTLoader::loadFromSource(
auto *DiagClient = new ForwardingDiagnosticConsumer{CI.getDiagnosticClient()};
IntrusiveRefCntPtr<DiagnosticIDs> DiagID{
CI.getDiagnostics().getDiagnosticIDs()};
- IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
- new DiagnosticsEngine{DiagID, *DiagOpts, DiagClient});
+ auto Diags = llvm::makeIntrusiveRefCnt<DiagnosticsEngine>(DiagID, *DiagOpts,
+ DiagClient);
return ASTUnit::LoadFromCommandLine(
CommandLineArgs.begin(), (CommandLineArgs.end()),
diff --git a/clang/lib/Driver/ToolChains/BareMetal.cpp b/clang/lib/Driver/ToolChains/BareMetal.cpp
index 207150e..25a16fe 100644
--- a/clang/lib/Driver/ToolChains/BareMetal.cpp
+++ b/clang/lib/Driver/ToolChains/BareMetal.cpp
@@ -671,7 +671,8 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
CmdArgs.push_back("--start-group");
AddRunTimeLibs(TC, D, CmdArgs, Args);
- CmdArgs.push_back("-lc");
+ if (!Args.hasArg(options::OPT_nolibc))
+ CmdArgs.push_back("-lc");
if (TC.hasValidGCCInstallation() || detectGCCToolchainAdjacent(D))
CmdArgs.push_back("-lgloss");
CmdArgs.push_back("--end-group");
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index f4674a5..4e1b1d9 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -226,17 +226,19 @@ static bool ShouldEnableAutolink(const ArgList &Args, const ToolChain &TC,
static const char *addDebugCompDirArg(const ArgList &Args,
ArgStringList &CmdArgs,
const llvm::vfs::FileSystem &VFS) {
+ std::string DebugCompDir;
if (Arg *A = Args.getLastArg(options::OPT_ffile_compilation_dir_EQ,
- options::OPT_fdebug_compilation_dir_EQ)) {
- if (A->getOption().matches(options::OPT_ffile_compilation_dir_EQ))
- CmdArgs.push_back(Args.MakeArgString(Twine("-fdebug-compilation-dir=") +
- A->getValue()));
+ options::OPT_fdebug_compilation_dir_EQ))
+ DebugCompDir = A->getValue();
+
+ if (DebugCompDir.empty()) {
+ if (llvm::ErrorOr<std::string> CWD = VFS.getCurrentWorkingDirectory())
+ DebugCompDir = std::move(*CWD);
else
- A->render(Args, CmdArgs);
- } else if (llvm::ErrorOr<std::string> CWD =
- VFS.getCurrentWorkingDirectory()) {
- CmdArgs.push_back(Args.MakeArgString("-fdebug-compilation-dir=" + *CWD));
+ return nullptr;
}
+ CmdArgs.push_back(
+ Args.MakeArgString("-fdebug-compilation-dir=" + DebugCompDir));
StringRef Path(CmdArgs.back());
return Path.substr(Path.find('=') + 1).data();
}
@@ -525,17 +527,17 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
CmdArgs.push_back("-fcoverage-mcdc");
}
+ StringRef CoverageCompDir;
if (Arg *A = Args.getLastArg(options::OPT_ffile_compilation_dir_EQ,
- options::OPT_fcoverage_compilation_dir_EQ)) {
- if (A->getOption().matches(options::OPT_ffile_compilation_dir_EQ))
- CmdArgs.push_back(Args.MakeArgString(
- Twine("-fcoverage-compilation-dir=") + A->getValue()));
- else
- A->render(Args, CmdArgs);
- } else if (llvm::ErrorOr<std::string> CWD =
- D.getVFS().getCurrentWorkingDirectory()) {
- CmdArgs.push_back(Args.MakeArgString("-fcoverage-compilation-dir=" + *CWD));
- }
+ options::OPT_fcoverage_compilation_dir_EQ))
+ CoverageCompDir = A->getValue();
+ if (CoverageCompDir.empty()) {
+ if (auto CWD = D.getVFS().getCurrentWorkingDirectory())
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-fcoverage-compilation-dir=") + *CWD));
+ } else
+ CmdArgs.push_back(Args.MakeArgString(Twine("-fcoverage-compilation-dir=") +
+ CoverageCompDir));
if (Args.hasArg(options::OPT_fprofile_exclude_files_EQ)) {
auto *Arg = Args.getLastArg(options::OPT_fprofile_exclude_files_EQ);
diff --git a/clang/lib/Driver/ToolChains/Cuda.cpp b/clang/lib/Driver/ToolChains/Cuda.cpp
index 7d803be..1f0b478 100644
--- a/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -88,6 +88,8 @@ CudaVersion getCudaVersion(uint32_t raw_version) {
return CudaVersion::CUDA_126;
if (raw_version < 12090)
return CudaVersion::CUDA_128;
+ if (raw_version < 13000)
+ return CudaVersion::CUDA_129;
return CudaVersion::NEW;
}
@@ -683,6 +685,7 @@ void NVPTX::getNVPTXTargetFeatures(const Driver &D, const llvm::Triple &Triple,
case CudaVersion::CUDA_##CUDA_VER: \
PtxFeature = "+ptx" #PTX_VER; \
break;
+ CASE_CUDA_VERSION(129, 88);
CASE_CUDA_VERSION(128, 87);
CASE_CUDA_VERSION(126, 85);
CASE_CUDA_VERSION(125, 85);
diff --git a/clang/lib/Frontend/ASTMerge.cpp b/clang/lib/Frontend/ASTMerge.cpp
index a4ce883..10c1045 100644
--- a/clang/lib/Frontend/ASTMerge.cpp
+++ b/clang/lib/Frontend/ASTMerge.cpp
@@ -41,10 +41,10 @@ void ASTMergeAction::ExecuteAction() {
auto SharedState = std::make_shared<ASTImporterSharedState>(
*CI.getASTContext().getTranslationUnitDecl());
for (unsigned I = 0, N = ASTFiles.size(); I != N; ++I) {
- IntrusiveRefCntPtr<DiagnosticsEngine> Diags(new DiagnosticsEngine(
+ auto Diags = llvm::makeIntrusiveRefCnt<DiagnosticsEngine>(
DiagIDs, CI.getDiagnosticOpts(),
new ForwardingDiagnosticConsumer(*CI.getDiagnostics().getClient()),
- /*ShouldOwnClient=*/true));
+ /*ShouldOwnClient=*/true);
std::unique_ptr<ASTUnit> Unit = ASTUnit::LoadFromASTFile(
ASTFiles[I], CI.getPCHContainerReader(), ASTUnit::LoadEverything,
nullptr, Diags, CI.getFileSystemOpts(), CI.getHeaderSearchOpts());
diff --git a/clang/lib/Frontend/ASTUnit.cpp b/clang/lib/Frontend/ASTUnit.cpp
index 67ed17b..5711f45 100644
--- a/clang/lib/Frontend/ASTUnit.cpp
+++ b/clang/lib/Frontend/ASTUnit.cpp
@@ -1204,7 +1204,7 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
// Set up diagnostics, capturing any diagnostics that would
// otherwise be dropped.
- Clang->setDiagnostics(&getDiagnostics());
+ Clang->setDiagnostics(getDiagnosticsPtr());
// Create the target instance.
if (!Clang->createTarget())
@@ -1424,7 +1424,7 @@ ASTUnit::getMainBufferWithPrecompiledPreamble(
PreambleInvocationIn.getFrontendOpts().SkipFunctionBodies = true;
llvm::ErrorOr<PrecompiledPreamble> NewPreamble = PrecompiledPreamble::Build(
- PreambleInvocationIn, MainFileBuffer.get(), Bounds, *Diagnostics, VFS,
+ PreambleInvocationIn, MainFileBuffer.get(), Bounds, Diagnostics, VFS,
PCHContainerOps, StorePreamblesInMemory, PreambleStoragePath,
Callbacks);
@@ -1624,7 +1624,7 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
// Set up diagnostics, capturing any diagnostics that would
// otherwise be dropped.
- Clang->setDiagnostics(&AST->getDiagnostics());
+ Clang->setDiagnostics(AST->getDiagnosticsPtr());
// Create the target instance.
if (!Clang->createTarget())
@@ -1773,7 +1773,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromCompilerInvocation(
if (AST->LoadFromCompilerInvocation(std::move(PCHContainerOps),
PrecompilePreambleAfterNParses,
- &AST->FileMgr->getVirtualFileSystem()))
+ AST->FileMgr->getVirtualFileSystemPtr()))
return nullptr;
return AST;
}
@@ -1895,7 +1895,7 @@ bool ASTUnit::Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
if (!VFS) {
assert(FileMgr && "FileMgr is null on Reparse call");
- VFS = &FileMgr->getVirtualFileSystem();
+ VFS = FileMgr->getVirtualFileSystemPtr();
}
clearFileLevelDecls();
@@ -2209,8 +2209,9 @@ void ASTUnit::CodeComplete(
bool IncludeCodePatterns, bool IncludeBriefComments,
CodeCompleteConsumer &Consumer,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
- DiagnosticsEngine &Diag, LangOptions &LangOpts, SourceManager &SourceMgr,
- FileManager &FileMgr, SmallVectorImpl<StoredDiagnostic> &StoredDiagnostics,
+ llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diag, LangOptions &LangOpts,
+ SourceManager &SourceMgr, FileManager &FileMgr,
+ SmallVectorImpl<StoredDiagnostic> &StoredDiagnostics,
SmallVectorImpl<const llvm::MemoryBuffer *> &OwnedBuffers,
std::unique_ptr<SyntaxOnlyAction> Act) {
if (!Invocation)
@@ -2259,11 +2260,11 @@ void ASTUnit::CodeComplete(
std::string(Clang->getFrontendOpts().Inputs[0].getFile());
// Set up diagnostics, capturing any diagnostics produced.
- Clang->setDiagnostics(&Diag);
+ Clang->setDiagnostics(Diag);
CaptureDroppedDiagnostics Capture(CaptureDiagsKind::All,
Clang->getDiagnostics(),
&StoredDiagnostics, nullptr);
- ProcessWarningOptions(Diag, Inv.getDiagnosticOpts(),
+ ProcessWarningOptions(*Diag, Inv.getDiagnosticOpts(),
FileMgr.getVirtualFileSystem());
// Create the target instance.
@@ -2321,7 +2322,8 @@ void ASTUnit::CodeComplete(
std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer;
if (Preamble && Line > 1 && hasSameUniqueID(File, OriginalSourceFile)) {
OverrideMainBuffer = getMainBufferWithPrecompiledPreamble(
- PCHContainerOps, Inv, &FileMgr.getVirtualFileSystem(), false, Line - 1);
+ PCHContainerOps, Inv, FileMgr.getVirtualFileSystemPtr(), false,
+ Line - 1);
}
// If the main file has been overridden due to the use of a preamble,
@@ -2331,7 +2333,7 @@ void ASTUnit::CodeComplete(
"No preamble was built, but OverrideMainBuffer is not null");
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
- &FileMgr.getVirtualFileSystem();
+ FileMgr.getVirtualFileSystemPtr();
Preamble->AddImplicitPreamble(Clang->getInvocation(), VFS,
OverrideMainBuffer.get());
// FIXME: there is no way to update VFS if it was changed by
diff --git a/clang/lib/Frontend/ChainedIncludesSource.cpp b/clang/lib/Frontend/ChainedIncludesSource.cpp
index ba7c767..88b1076 100644
--- a/clang/lib/Frontend/ChainedIncludesSource.cpp
+++ b/clang/lib/Frontend/ChainedIncludesSource.cpp
@@ -118,13 +118,12 @@ IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
TextDiagnosticPrinter *DiagClient =
new TextDiagnosticPrinter(llvm::errs(), CI.getDiagnosticOpts());
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
- new DiagnosticsEngine(DiagID, CI.getDiagnosticOpts(), DiagClient));
+ auto Diags = llvm::makeIntrusiveRefCnt<DiagnosticsEngine>(
+ DiagnosticIDs::create(), CI.getDiagnosticOpts(), DiagClient);
auto Clang = std::make_unique<CompilerInstance>(
std::move(CInvok), CI.getPCHContainerOperations());
- Clang->setDiagnostics(Diags.get());
+ Clang->setDiagnostics(Diags);
Clang->setTarget(TargetInfo::CreateTargetInfo(
Clang->getDiagnostics(), Clang->getInvocation().getTargetOpts()));
Clang->createFileManager();
diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp
index c7b82db..ed6a651 100644
--- a/clang/lib/Frontend/CompilerInstance.cpp
+++ b/clang/lib/Frontend/CompilerInstance.cpp
@@ -87,8 +87,9 @@ bool CompilerInstance::shouldBuildGlobalModuleIndex() const {
!DisableGeneratingGlobalModuleIndex;
}
-void CompilerInstance::setDiagnostics(DiagnosticsEngine *Value) {
- Diagnostics = Value;
+void CompilerInstance::setDiagnostics(
+ llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Value) {
+ Diagnostics = std::move(Value);
}
void CompilerInstance::setVerboseOutputStream(raw_ostream &Value) {
@@ -160,6 +161,11 @@ llvm::vfs::FileSystem &CompilerInstance::getVirtualFileSystem() const {
return getFileManager().getVirtualFileSystem();
}
+llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem>
+CompilerInstance::getVirtualFileSystemPtr() const {
+ return getFileManager().getVirtualFileSystemPtr();
+}
+
void CompilerInstance::setFileManager(FileManager *Value) {
FileMgr = Value;
}
@@ -340,9 +346,8 @@ IntrusiveRefCntPtr<DiagnosticsEngine> CompilerInstance::createDiagnostics(
llvm::vfs::FileSystem &VFS, DiagnosticOptions &Opts,
DiagnosticConsumer *Client, bool ShouldOwnClient,
const CodeGenOptions *CodeGenOpts) {
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
- new DiagnosticsEngine(DiagID, Opts));
+ auto Diags = llvm::makeIntrusiveRefCnt<DiagnosticsEngine>(
+ DiagnosticIDs::create(), Opts);
// Create the diagnostic client for reporting errors or for
// implementing -verify.
@@ -375,7 +380,7 @@ IntrusiveRefCntPtr<DiagnosticsEngine> CompilerInstance::createDiagnostics(
FileManager *CompilerInstance::createFileManager(
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
if (!VFS)
- VFS = FileMgr ? &FileMgr->getVirtualFileSystem()
+ VFS = FileMgr ? FileMgr->getVirtualFileSystemPtr()
: createVFSFromCompilerInvocation(getInvocation(),
getDiagnostics());
assert(VFS && "FileManager has no VFS?");
@@ -1218,7 +1223,7 @@ std::unique_ptr<CompilerInstance> CompilerInstance::cloneForModuleCompileImpl(
} else if (FrontendOpts.ModulesShareFileManager) {
Instance.setFileManager(&getFileManager());
} else {
- Instance.createFileManager(&getVirtualFileSystem());
+ Instance.createFileManager(getVirtualFileSystemPtr());
}
if (ThreadSafeConfig) {
diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp
index ab4384a..9f77e62 100644
--- a/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/clang/lib/Frontend/CompilerInvocation.cpp
@@ -826,7 +826,7 @@ static bool RoundTrip(ParseFn Parse, GenerateFn Generate,
// Setup a dummy DiagnosticsEngine.
DiagnosticOptions DummyDiagOpts;
- DiagnosticsEngine DummyDiags(new DiagnosticIDs(), DummyDiagOpts);
+ DiagnosticsEngine DummyDiags(DiagnosticIDs::create(), DummyDiagOpts);
DummyDiags.setClient(new TextDiagnosticBuffer());
// Run the first parse on the original arguments with the dummy invocation and
@@ -2685,7 +2685,7 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
std::optional<DiagnosticsEngine> IgnoringDiags;
if (!Diags) {
IgnoringDiagOpts.emplace();
- IgnoringDiags.emplace(new DiagnosticIDs(), *IgnoringDiagOpts,
+ IgnoringDiags.emplace(DiagnosticIDs::create(), *IgnoringDiagOpts,
new IgnoringDiagConsumer());
Diags = &*IgnoringDiags;
}
diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp
index 1d82fc7..12b5c18 100644
--- a/clang/lib/Frontend/FrontendAction.cpp
+++ b/clang/lib/Frontend/FrontendAction.cpp
@@ -226,6 +226,8 @@ private:
}
*OS << " ]\n";
*OS << "}\n";
+
+ OS->flush();
}
};
@@ -763,11 +765,11 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// If we're replaying the build of an AST file, import it and set up
// the initial state from its build.
if (ReplayASTFile) {
- IntrusiveRefCntPtr<DiagnosticsEngine> Diags(&CI.getDiagnostics());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags = CI.getDiagnosticsPtr();
// The AST unit populates its own diagnostics engine rather than ours.
- IntrusiveRefCntPtr<DiagnosticsEngine> ASTDiags(new DiagnosticsEngine(
- Diags->getDiagnosticIDs(), Diags->getDiagnosticOptions()));
+ auto ASTDiags = llvm::makeIntrusiveRefCnt<DiagnosticsEngine>(
+ Diags->getDiagnosticIDs(), Diags->getDiagnosticOptions());
ASTDiags->setClient(Diags->getClient(), /*OwnsClient*/false);
// FIXME: What if the input is a memory buffer?
@@ -835,7 +837,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
assert(hasASTFileSupport() &&
"This action does not have AST file support!");
- IntrusiveRefCntPtr<DiagnosticsEngine> Diags(&CI.getDiagnostics());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags = CI.getDiagnosticsPtr();
// FIXME: What if the input is a memory buffer?
StringRef InputFile = Input.getFile();
diff --git a/clang/lib/Frontend/PrecompiledPreamble.cpp b/clang/lib/Frontend/PrecompiledPreamble.cpp
index 146cf90..7fc1d87 100644
--- a/clang/lib/Frontend/PrecompiledPreamble.cpp
+++ b/clang/lib/Frontend/PrecompiledPreamble.cpp
@@ -57,11 +57,9 @@ createVFSOverlayForPreamblePCH(StringRef PCHFilename,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
// We want only the PCH file from the real filesystem to be available,
// so we create an in-memory VFS with just that and overlay it on top.
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> PCHFS(
- new llvm::vfs::InMemoryFileSystem());
+ auto PCHFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
PCHFS->addFile(PCHFilename, 0, std::move(PCHBuffer));
- IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> Overlay(
- new llvm::vfs::OverlayFileSystem(VFS));
+ auto Overlay = llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(VFS);
Overlay->pushOverlay(PCHFS);
return Overlay;
}
@@ -414,7 +412,7 @@ PrecompiledPreamble::operator=(PrecompiledPreamble &&) = default;
llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
const CompilerInvocation &Invocation,
const llvm::MemoryBuffer *MainFileBuffer, PreambleBounds Bounds,
- DiagnosticsEngine &Diagnostics,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
std::shared_ptr<PCHContainerOperations> PCHContainerOps, bool StoreInMemory,
StringRef StoragePath, PreambleCallbacks &Callbacks) {
@@ -463,7 +461,7 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance> CICleanup(
Clang.get());
- Clang->setDiagnostics(&Diagnostics);
+ Clang->setDiagnostics(Diagnostics);
// Create the target instance.
if (!Clang->createTarget())
@@ -478,18 +476,18 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
}
// Clear out old caches and data.
- Diagnostics.Reset();
- ProcessWarningOptions(Diagnostics, Clang->getDiagnosticOpts(), *VFS);
+ Diagnostics->Reset();
+ ProcessWarningOptions(*Diagnostics, Clang->getDiagnosticOpts(), *VFS);
- VFS =
- createVFSFromCompilerInvocation(Clang->getInvocation(), Diagnostics, VFS);
+ VFS = createVFSFromCompilerInvocation(Clang->getInvocation(), *Diagnostics,
+ VFS);
// Create a file manager object to provide access to and cache the filesystem.
Clang->setFileManager(new FileManager(Clang->getFileSystemOpts(), VFS));
// Create the source manager.
Clang->setSourceManager(
- new SourceManager(Diagnostics, Clang->getFileManager()));
+ new SourceManager(*Diagnostics, Clang->getFileManager()));
auto PreambleDepCollector = std::make_shared<PreambleDependencyCollector>();
Clang->addDependencyCollector(PreambleDepCollector);
diff --git a/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp b/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
index 23a1f90..1f5bb47 100644
--- a/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -753,10 +753,9 @@ DiagnosticsEngine *SDiagsWriter::getMetaDiags() {
// to be distinct from the engine the writer was being added to and would
// normally not be used.
if (!State->MetaDiagnostics) {
- IntrusiveRefCntPtr<DiagnosticIDs> IDs(new DiagnosticIDs());
auto Client = new TextDiagnosticPrinter(llvm::errs(), State->DiagOpts);
- State->MetaDiagnostics =
- std::make_unique<DiagnosticsEngine>(IDs, State->DiagOpts, Client);
+ State->MetaDiagnostics = std::make_unique<DiagnosticsEngine>(
+ DiagnosticIDs::create(), State->DiagOpts, Client);
}
return State->MetaDiagnostics.get();
}
diff --git a/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h b/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h
index e8ccccb..c877234 100644
--- a/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h
+++ b/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h
@@ -12,7 +12,7 @@
namespace hlsl {
namespace __detail {
-constexpr vector<uint, 4> d3d_color_to_ubyte4_impl(vector<float, 4> V) {
+constexpr int4 d3d_color_to_ubyte4_impl(float4 V) {
// Use the same scaling factor used by FXC, and DXC for DXIL
// (i.e., 255.001953)
// https://github.com/microsoft/DirectXShaderCompiler/blob/070d0d5a2beacef9eeb51037a9b04665716fd6f3/lib/HLSL/HLOperationLower.cpp#L666C1-L697C2
diff --git a/clang/lib/Headers/hlsl/hlsl_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
index 499a053..d9d87c8 100644
--- a/clang/lib/Headers/hlsl/hlsl_intrinsics.h
+++ b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
@@ -418,7 +418,7 @@ const inline float4 lit(float NDotL, float NDotH, float M) {
/// This function swizzles and scales components of the \a x parameter. Use this
/// function to compensate for the lack of UBYTE4 support in some hardware.
-constexpr vector<uint, 4> D3DCOLORtoUBYTE4(vector<float, 4> V) {
+constexpr int4 D3DCOLORtoUBYTE4(float4 V) {
return __detail::d3d_color_to_ubyte4_impl(V);
}
diff --git a/clang/lib/Interpreter/CodeCompletion.cpp b/clang/lib/Interpreter/CodeCompletion.cpp
index dac3888..ecdf489 100644
--- a/clang/lib/Interpreter/CodeCompletion.cpp
+++ b/clang/lib/Interpreter/CodeCompletion.cpp
@@ -380,7 +380,7 @@ void ReplCodeCompleter::codeComplete(CompilerInstance *InterpCI,
AU->setOwnsRemappedFileBuffers(false);
AU->CodeComplete(CodeCompletionFileName, 1, Col, RemappedFiles, false, false,
false, consumer,
- std::make_shared<clang::PCHContainerOperations>(), *diag,
+ std::make_shared<clang::PCHContainerOperations>(), diag,
InterpCI->getLangOpts(), AU->getSourceManager(),
AU->getFileManager(), sd, tb, std::move(Act));
}
diff --git a/clang/lib/Interpreter/Interpreter.cpp b/clang/lib/Interpreter/Interpreter.cpp
index 9b71486..5e5ae81 100644
--- a/clang/lib/Interpreter/Interpreter.cpp
+++ b/clang/lib/Interpreter/Interpreter.cpp
@@ -86,7 +86,6 @@ GetCC1Arguments(DiagnosticsEngine *Diagnostics,
static llvm::Expected<std::unique_ptr<CompilerInstance>>
CreateCI(const llvm::opt::ArgStringList &Argv) {
std::unique_ptr<CompilerInstance> Clang(new CompilerInstance());
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
// Register the support for object-file-wrapped Clang modules.
// FIXME: Clang should register these container operations automatically.
@@ -98,7 +97,7 @@ CreateCI(const llvm::opt::ArgStringList &Argv) {
// a well formed diagnostic object.
DiagnosticOptions DiagOpts;
TextDiagnosticBuffer *DiagsBuffer = new TextDiagnosticBuffer;
- DiagnosticsEngine Diags(DiagID, DiagOpts, DiagsBuffer);
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), DiagOpts, DiagsBuffer);
bool Success = CompilerInvocation::CreateFromArgs(
Clang->getInvocation(), llvm::ArrayRef(Argv.begin(), Argv.size()), Diags);
@@ -174,11 +173,10 @@ IncrementalCompilerBuilder::create(std::string TT,
// Buffer diagnostics from argument parsing so that we can output them using a
// well formed diagnostic object.
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
std::unique_ptr<DiagnosticOptions> DiagOpts =
CreateAndPopulateDiagOpts(ClangArgv);
TextDiagnosticBuffer *DiagsBuffer = new TextDiagnosticBuffer;
- DiagnosticsEngine Diags(DiagID, *DiagOpts, DiagsBuffer);
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), *DiagOpts, DiagsBuffer);
driver::Driver Driver(/*MainBinaryName=*/ClangArgv[0], TT, Diags);
Driver.setCheckInputsExist(false); // the input comes from mem buffers
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index c23c98a..8580de2 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -84,6 +84,16 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
return checkMovDPPFunctionCall(TheCall, 2, 1);
case AMDGPU::BI__builtin_amdgcn_update_dpp: {
return checkMovDPPFunctionCall(TheCall, 6, 2);
+ case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f16_fp8:
+ case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_bf16_fp8:
+ case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f16_bf8:
+ case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_bf16_bf8:
+ case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f16_fp4:
+ case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_bf16_fp4:
+ case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f32_fp8:
+ case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f32_bf8:
+ case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f32_fp4:
+ return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 7);
}
default:
return false;
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index c74b671..bc87611 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -15893,9 +15893,7 @@ ExprResult Sema::BuiltinMatrixTranspose(CallExpr *TheCall,
// Get and verify the matrix dimensions.
static std::optional<unsigned>
getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) {
- SourceLocation ErrorPos;
- std::optional<llvm::APSInt> Value =
- Expr->getIntegerConstantExpr(S.Context, &ErrorPos);
+ std::optional<llvm::APSInt> Value = Expr->getIntegerConstantExpr(S.Context);
if (!Value) {
S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg)
<< Name;
diff --git a/clang/lib/Sema/SemaCodeComplete.cpp b/clang/lib/Sema/SemaCodeComplete.cpp
index a43ac9e..0de5580 100644
--- a/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/clang/lib/Sema/SemaCodeComplete.cpp
@@ -4034,6 +4034,14 @@ static void AddOverloadParameterChunks(
return;
}
+ // C++23 introduces an explicit object parameter, a.k.a. "deducing this"
+ // Skip it for autocomplete and treat the next parameter as the first
+ // parameter
+ if (Function && FirstParameter &&
+ Function->getParamDecl(P)->isExplicitObjectParameter()) {
+ continue;
+ }
+
if (FirstParameter)
FirstParameter = false;
else
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index c7e7507..d255c11 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -3063,7 +3063,8 @@ static void checkNewAttributesAfterDef(Sema &S, Decl *New, const Decl *Old) {
// error since the definition will have already been created without
// the semantic effects of the attribute having been applied.
S.Diag(NewAttribute->getLocation(),
- diag::err_sycl_entry_point_after_definition);
+ diag::err_sycl_entry_point_after_definition)
+ << NewAttribute;
S.Diag(Def->getLocation(), diag::note_previous_definition);
cast<SYCLKernelEntryPointAttr>(NewAttribute)->setInvalidAttr();
++I;
@@ -16258,19 +16259,19 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
FD->getAttr<SYCLKernelEntryPointAttr>();
if (FD->isDefaulted()) {
Diag(SKEPAttr->getLocation(), diag::err_sycl_entry_point_invalid)
- << /*defaulted function*/ 3;
+ << SKEPAttr << /*defaulted function*/ 3;
SKEPAttr->setInvalidAttr();
} else if (FD->isDeleted()) {
Diag(SKEPAttr->getLocation(), diag::err_sycl_entry_point_invalid)
- << /*deleted function*/ 2;
+ << SKEPAttr << /*deleted function*/ 2;
SKEPAttr->setInvalidAttr();
} else if (FSI->isCoroutine()) {
Diag(SKEPAttr->getLocation(), diag::err_sycl_entry_point_invalid)
- << /*coroutine*/ 7;
+ << SKEPAttr << /*coroutine*/ 7;
SKEPAttr->setInvalidAttr();
} else if (Body && isa<CXXTryStmt>(Body)) {
Diag(SKEPAttr->getLocation(), diag::err_sycl_entry_point_invalid)
- << /*function defined with a function try block*/ 8;
+ << SKEPAttr << /*function defined with a function try block*/ 8;
SKEPAttr->setInvalidAttr();
}
diff --git a/clang/lib/Sema/SemaSYCL.cpp b/clang/lib/Sema/SemaSYCL.cpp
index 3e03cb4..4683c81 100644
--- a/clang/lib/Sema/SemaSYCL.cpp
+++ b/clang/lib/Sema/SemaSYCL.cpp
@@ -262,12 +262,13 @@ void SemaSYCL::CheckSYCLEntryPointFunctionDecl(FunctionDecl *FD) {
if (!getASTContext().hasSameType(SAI->getKernelName(),
SKEPAttr->getKernelName())) {
Diag(SAI->getLocation(), diag::err_sycl_entry_point_invalid_redeclaration)
- << SAI->getKernelName() << SKEPAttr->getKernelName();
+ << SKEPAttr << SAI->getKernelName() << SKEPAttr->getKernelName();
Diag(SKEPAttr->getLocation(), diag::note_previous_attribute);
SAI->setInvalidAttr();
} else {
Diag(SAI->getLocation(),
- diag::warn_sycl_entry_point_redundant_declaration);
+ diag::warn_sycl_entry_point_redundant_declaration)
+ << SAI;
Diag(SKEPAttr->getLocation(), diag::note_previous_attribute);
}
}
@@ -289,7 +290,8 @@ void SemaSYCL::CheckSYCLEntryPointFunctionDecl(FunctionDecl *FD) {
PrevSKEPAttr->getKernelName())) {
Diag(SKEPAttr->getLocation(),
diag::err_sycl_entry_point_invalid_redeclaration)
- << SKEPAttr->getKernelName() << PrevSKEPAttr->getKernelName();
+ << SKEPAttr << SKEPAttr->getKernelName()
+ << PrevSKEPAttr->getKernelName();
Diag(PrevSKEPAttr->getLocation(), diag::note_previous_decl) << PrevFD;
SKEPAttr->setInvalidAttr();
}
@@ -299,50 +301,52 @@ void SemaSYCL::CheckSYCLEntryPointFunctionDecl(FunctionDecl *FD) {
if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
if (!MD->isStatic()) {
Diag(SKEPAttr->getLocation(), diag::err_sycl_entry_point_invalid)
- << /*non-static member function*/ 0;
+ << SKEPAttr << /*non-static member function*/ 0;
SKEPAttr->setInvalidAttr();
}
}
if (FD->isVariadic()) {
Diag(SKEPAttr->getLocation(), diag::err_sycl_entry_point_invalid)
- << /*variadic function*/ 1;
+ << SKEPAttr << /*variadic function*/ 1;
SKEPAttr->setInvalidAttr();
}
if (FD->isDefaulted()) {
Diag(SKEPAttr->getLocation(), diag::err_sycl_entry_point_invalid)
- << /*defaulted function*/ 3;
+ << SKEPAttr << /*defaulted function*/ 3;
SKEPAttr->setInvalidAttr();
} else if (FD->isDeleted()) {
Diag(SKEPAttr->getLocation(), diag::err_sycl_entry_point_invalid)
- << /*deleted function*/ 2;
+ << SKEPAttr << /*deleted function*/ 2;
SKEPAttr->setInvalidAttr();
}
if (FD->isConsteval()) {
Diag(SKEPAttr->getLocation(), diag::err_sycl_entry_point_invalid)
- << /*consteval function*/ 5;
+ << SKEPAttr << /*consteval function*/ 5;
SKEPAttr->setInvalidAttr();
} else if (FD->isConstexpr()) {
Diag(SKEPAttr->getLocation(), diag::err_sycl_entry_point_invalid)
- << /*constexpr function*/ 4;
+ << SKEPAttr << /*constexpr function*/ 4;
SKEPAttr->setInvalidAttr();
}
if (FD->isNoReturn()) {
Diag(SKEPAttr->getLocation(), diag::err_sycl_entry_point_invalid)
- << /*function declared with the 'noreturn' attribute*/ 6;
+ << SKEPAttr << /*function declared with the 'noreturn' attribute*/ 6;
SKEPAttr->setInvalidAttr();
}
if (FD->getReturnType()->isUndeducedType()) {
Diag(SKEPAttr->getLocation(),
- diag::err_sycl_entry_point_deduced_return_type);
+ diag::err_sycl_entry_point_deduced_return_type)
+ << SKEPAttr;
SKEPAttr->setInvalidAttr();
} else if (!FD->getReturnType()->isDependentType() &&
!FD->getReturnType()->isVoidType()) {
- Diag(SKEPAttr->getLocation(), diag::err_sycl_entry_point_return_type);
+ Diag(SKEPAttr->getLocation(), diag::err_sycl_entry_point_return_type)
+ << SKEPAttr;
SKEPAttr->setInvalidAttr();
}
@@ -354,7 +358,8 @@ void SemaSYCL::CheckSYCLEntryPointFunctionDecl(FunctionDecl *FD) {
if (!declaresSameEntity(FD, SKI->getKernelEntryPointDecl())) {
// FIXME: This diagnostic should include the origin of the kernel
// FIXME: names; not just the locations of the conflicting declarations.
- Diag(FD->getLocation(), diag::err_sycl_kernel_name_conflict);
+ Diag(FD->getLocation(), diag::err_sycl_kernel_name_conflict)
+ << SKEPAttr;
Diag(SKI->getKernelEntryPointDecl()->getLocation(),
diag::note_previous_declaration);
SKEPAttr->setInvalidAttr();
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index f896f9f1..682d263 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -677,8 +677,7 @@ bool PCHValidator::ReadDiagnosticOptions(DiagnosticOptions &DiagOpts,
bool Complain) {
DiagnosticsEngine &ExistingDiags = PP.getDiagnostics();
IntrusiveRefCntPtr<DiagnosticIDs> DiagIDs(ExistingDiags.getDiagnosticIDs());
- IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
- new DiagnosticsEngine(DiagIDs, DiagOpts));
+ auto Diags = llvm::makeIntrusiveRefCnt<DiagnosticsEngine>(DiagIDs, DiagOpts);
// This should never fail, because we would have processed these options
// before writing them to an ASTFile.
ProcessWarningOptions(*Diags, DiagOpts,
diff --git a/clang/lib/Serialization/ASTReaderDecl.cpp b/clang/lib/Serialization/ASTReaderDecl.cpp
index bd84a97..cdaf38d 100644
--- a/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -1147,7 +1147,8 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
const SYCLKernelInfo *SKI = C.findSYCLKernelInfo(SKEPAttr->getKernelName());
if (SKI) {
if (!declaresSameEntity(FD, SKI->getKernelEntryPointDecl())) {
- Reader.Diag(FD->getLocation(), diag::err_sycl_kernel_name_conflict);
+ Reader.Diag(FD->getLocation(), diag::err_sycl_kernel_name_conflict)
+ << SKEPAttr;
Reader.Diag(SKI->getKernelEntryPointDecl()->getLocation(),
diag::note_previous_declaration);
SKEPAttr->setInvalidAttr();
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index 8535384..fe70558 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -1227,7 +1227,7 @@ void ExprEngine::VisitAttributedStmt(const AttributedStmt *A,
for (const auto *Attr : getSpecificAttrs<CXXAssumeAttr>(A->getAttrs())) {
for (ExplodedNode *N : CheckerPreStmt) {
- Visit(Attr->getAssumption(), N, EvalSet);
+ Visit(Attr->getAssumption()->IgnoreParens(), N, EvalSet);
}
}
diff --git a/clang/lib/Tooling/CompilationDatabase.cpp b/clang/lib/Tooling/CompilationDatabase.cpp
index d5fc216..860457a 100644
--- a/clang/lib/Tooling/CompilationDatabase.cpp
+++ b/clang/lib/Tooling/CompilationDatabase.cpp
@@ -241,9 +241,8 @@ static bool stripPositionalArgs(std::vector<const char *> Args,
llvm::raw_string_ostream Output(ErrorMsg);
TextDiagnosticPrinter DiagnosticPrinter(Output, DiagOpts);
UnusedInputDiagConsumer DiagClient(DiagnosticPrinter);
- DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()), DiagOpts,
- &DiagClient, false);
+ DiagnosticsEngine Diagnostics(DiagnosticIDs::create(), DiagOpts, &DiagClient,
+ false);
// The clang executable path isn't required since the jobs the driver builds
// will not be executed.
diff --git a/clang/lib/Tooling/Core/Replacement.cpp b/clang/lib/Tooling/Core/Replacement.cpp
index 1506218..10bdc22 100644
--- a/clang/lib/Tooling/Core/Replacement.cpp
+++ b/clang/lib/Tooling/Core/Replacement.cpp
@@ -581,12 +581,11 @@ llvm::Expected<std::string> applyAllReplacements(StringRef Code,
if (Replaces.empty())
return Code.str();
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
FileManager Files(FileSystemOptions(), InMemoryFileSystem);
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs), DiagOpts);
+ DiagnosticsEngine Diagnostics(DiagnosticIDs::create(), DiagOpts);
SourceManager SourceMgr(Diagnostics, Files);
Rewriter Rewrite(SourceMgr, LangOptions());
InMemoryFileSystem->addFile(
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
index 8ce2706..b2b61de7 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
@@ -605,8 +605,8 @@ DependencyScanningWorker::DependencyScanningWorker(
switch (Service.getMode()) {
case ScanningMode::DependencyDirectivesScan:
- DepFS =
- new DependencyScanningWorkerFilesystem(Service.getSharedCache(), FS);
+ DepFS = llvm::makeIntrusiveRefCnt<DependencyScanningWorkerFilesystem>(
+ Service.getSharedCache(), FS);
BaseFS = DepFS;
break;
case ScanningMode::CanonicalPreprocessing:
diff --git a/clang/lib/Tooling/Refactoring.cpp b/clang/lib/Tooling/Refactoring.cpp
index d2b0b37..9b1af237 100644
--- a/clang/lib/Tooling/Refactoring.cpp
+++ b/clang/lib/Tooling/Refactoring.cpp
@@ -39,9 +39,8 @@ int RefactoringTool::runAndSave(FrontendActionFactory *ActionFactory) {
LangOptions DefaultLangOptions;
DiagnosticOptions DiagOpts;
TextDiagnosticPrinter DiagnosticPrinter(llvm::errs(), DiagOpts);
- DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()), DiagOpts,
- &DiagnosticPrinter, false);
+ DiagnosticsEngine Diagnostics(DiagnosticIDs::create(), DiagOpts,
+ &DiagnosticPrinter, false);
SourceManager Sources(Diagnostics, getFiles());
Rewriter Rewrite(Sources, DefaultLangOptions);
diff --git a/clang/lib/Tooling/Tooling.cpp b/clang/lib/Tooling/Tooling.cpp
index 5333956..ecafe26 100644
--- a/clang/lib/Tooling/Tooling.cpp
+++ b/clang/lib/Tooling/Tooling.cpp
@@ -227,10 +227,11 @@ bool runToolOnCodeWithArgs(
const Twine &ToolName,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
const FileContentMappings &VirtualMappedFiles) {
- llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
- new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto OverlayFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ llvm::vfs::getRealFileSystem());
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
SmallString<1024> CodeStorage;
@@ -403,7 +404,7 @@ bool ToolInvocation::run() {
}
const std::unique_ptr<driver::Driver> Driver(
- newDriver(&*Diagnostics, BinaryName, &Files->getVirtualFileSystem()));
+ newDriver(&*Diagnostics, BinaryName, Files->getVirtualFileSystemPtr()));
// The "input file not found" diagnostics from the driver are useful.
// The driver is only aware of the VFS working directory, but some clients
// change this at the FileManager level instead.
@@ -473,8 +474,10 @@ ClangTool::ClangTool(const CompilationDatabase &Compilations,
IntrusiveRefCntPtr<FileManager> Files)
: Compilations(Compilations), SourcePaths(SourcePaths),
PCHContainerOps(std::move(PCHContainerOps)),
- OverlayFileSystem(new llvm::vfs::OverlayFileSystem(std::move(BaseFS))),
- InMemoryFileSystem(new llvm::vfs::InMemoryFileSystem),
+ OverlayFileSystem(llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ std::move(BaseFS))),
+ InMemoryFileSystem(
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>()),
Files(Files ? Files
: new FileManager(FileSystemOptions(), OverlayFileSystem)) {
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
@@ -692,10 +695,11 @@ std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS) {
std::vector<std::unique_ptr<ASTUnit>> ASTs;
ASTBuilderAction Action(ASTs);
- llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
- new llvm::vfs::OverlayFileSystem(std::move(BaseFS)));
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto OverlayFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ std::move(BaseFS));
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
llvm::IntrusiveRefCntPtr<FileManager> Files(
new FileManager(FileSystemOptions(), OverlayFileSystem));
diff --git a/clang/test/AST/ByteCode/codegen.cpp b/clang/test/AST/ByteCode/codegen.cpp
index 6f9e75e..1bc756c 100644
--- a/clang/test/AST/ByteCode/codegen.cpp
+++ b/clang/test/AST/ByteCode/codegen.cpp
@@ -23,6 +23,10 @@ S s;
// CHECK: @sp = constant ptr getelementptr (i8, ptr @s, i64 16), align 8
float &sp = s.c[3];
+namespace NearlyZeroInit {
+ // CHECK: @_ZN14NearlyZeroInit1bE ={{.*}} global{{.*}} { i32, <{ i32, [2147483647 x i32] }> } { i32 1, <{ i32, [2147483647 x i32] }> <{ i32 2, [2147483647 x i32] zeroinitializer }> }{{.*}}
+ struct B { int n; int arr[1024 * 1024 * 1024 * 2u]; } b = {1, {2}};
+}
namespace BaseClassOffsets {
struct A { int a; };
diff --git a/clang/test/AST/ByteCode/functions.cpp b/clang/test/AST/ByteCode/functions.cpp
index b5e6f5b..363b6a5 100644
--- a/clang/test/AST/ByteCode/functions.cpp
+++ b/clang/test/AST/ByteCode/functions.cpp
@@ -5,6 +5,8 @@
// RUN: %clang_cc1 -pedantic -std=c++14 -verify=ref,both %s
// RUN: %clang_cc1 -pedantic -std=c++20 -verify=ref,both %s
+#define fold(x) (__builtin_constant_p(0) ? (x) : (x))
+
constexpr void doNothing() {}
constexpr int gimme5() {
doNothing();
@@ -654,14 +656,26 @@ namespace {
}
namespace FunctionCast {
- // When folding, we allow functions to be cast to different types. Such
- // cast functions cannot be called, even if they're constexpr.
+ // When folding, we allow functions to be cast to different types. We only
+ // allow calls if the dynamic type of the pointer matches the type of the
+ // call.
constexpr int f() { return 1; }
+ constexpr void* f2() { return nullptr; }
+ constexpr int f3(int a) { return a; }
typedef double (*DoubleFn)();
typedef int (*IntFn)();
- int a[(int)DoubleFn(f)()]; // both-error {{variable length array}} \
- // both-warning {{are a Clang extension}}
- int b[(int)IntFn(f)()]; // ok
+ typedef int* (*IntPtrFn)();
+ constexpr int test1 = (int)DoubleFn(f)(); // both-error {{constant expression}} both-note {{reinterpret_cast}}
+ // FIXME: We should print a note explaining the error.
+ constexpr int test2 = (int)fold(DoubleFn(f))(); // both-error {{constant expression}}
+ constexpr int test3 = (int)IntFn(f)(); // no-op cast
+ constexpr int test4 = fold(IntFn(DoubleFn(f)))();
+ constexpr int test5 = IntFn(fold(DoubleFn(f)))(); // both-error {{constant expression}} \
+ // both-note {{cast that performs the conversions of a reinterpret_cast is not allowed in a constant expression}}
+ // FIXME: Interpreter is less strict here.
+ constexpr int test6 = fold(IntPtrFn(f2))() == nullptr; // ref-error {{constant expression}}
+ // FIXME: The following crashes interpreter
+ // constexpr int test6 = fold(IntFn(f3)());
}
#if __cplusplus >= 202002L
diff --git a/clang/test/AST/ByteCode/intap.cpp b/clang/test/AST/ByteCode/intap.cpp
index 6888387..05ab319b 100644
--- a/clang/test/AST/ByteCode/intap.cpp
+++ b/clang/test/AST/ByteCode/intap.cpp
@@ -292,7 +292,19 @@ constexpr int shifts() { // both-error {{never produces a constant expression}}
(void)(2 << b); // ref-warning {{shift count is negative}}
return 1;
}
-#endif
+namespace UnderlyingInt128 {
+ enum F {
+ a = (__int128)-1
+ };
+
+ constexpr int foo() { // both-error {{never produces a constant expression}}
+ F f = (F)(__int128)10; // both-note 2{{integer value 10 is outside the valid range of values [-1, 0] for the enumeration type 'F'}}
+ return (int)f;
+ }
+ static_assert(foo() == 0, ""); // both-error {{not an integral constant expression}} \
+ // both-note {{in call to}}
+}
+#endif
#endif
diff --git a/clang/test/ASTSYCL/ast-dump-sycl-kernel-entry-point.cpp b/clang/test/ASTSYCL/ast-dump-sycl-kernel-entry-point.cpp
index b112e9e..1a82bdc 100644
--- a/clang/test/ASTSYCL/ast-dump-sycl-kernel-entry-point.cpp
+++ b/clang/test/ASTSYCL/ast-dump-sycl-kernel-entry-point.cpp
@@ -28,21 +28,21 @@
// A unique kernel name type is required for each declared kernel entry point.
template<int, int=0> struct KN;
-__attribute__((sycl_kernel_entry_point(KN<1>)))
+[[clang::sycl_kernel_entry_point(KN<1>)]]
void skep1() {
}
// CHECK: |-FunctionDecl {{.*}} skep1 'void ()'
// CHECK: | `-SYCLKernelEntryPointAttr {{.*}} KN<1>
using KN2 = KN<2>;
-__attribute__((sycl_kernel_entry_point(KN2)))
+[[clang::sycl_kernel_entry_point(KN2)]]
void skep2() {
}
// CHECK: |-FunctionDecl {{.*}} skep2 'void ()'
// CHECK: | `-SYCLKernelEntryPointAttr {{.*}} KN2
template<int I> using KNT = KN<I>;
-__attribute__((sycl_kernel_entry_point(KNT<3>)))
+[[clang::sycl_kernel_entry_point(KNT<3>)]]
void skep3() {
}
// CHECK: |-FunctionDecl {{.*}} skep3 'void ()'
diff --git a/clang/test/Analysis/builtin_assume.cpp b/clang/test/Analysis/builtin_assume.cpp
index 7158306..29a96c0 100644
--- a/clang/test/Analysis/builtin_assume.cpp
+++ b/clang/test/Analysis/builtin_assume.cpp
@@ -62,3 +62,16 @@ int using_builtin_assume_has_no_sideeffects(int y) {
return y;
}
+
+template <int ...args>
+bool issue151529() {
+ // no-crash
+ [[assume((true))]];
+ // no-crash
+ [[assume(((args >= 0) && ...))]]; // expected-warning {{pack fold expression is a C++17 extension}}
+ return ((args >= 0) && ...); // expected-warning {{pack fold expression is a C++17 extension}}
+}
+
+void instantiate_issue151529() {
+ issue151529<0>();
+}
diff --git a/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c b/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c
new file mode 100644
index 0000000..3643cf2
--- /dev/null
+++ b/clang/test/CIR/CodeGen/aapcs-volatile-bitfields.c
@@ -0,0 +1,73 @@
+// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -fclangir -emit-cir -fdump-record-layouts %s -o %t.cir 1> %t.cirlayout
+// RUN: FileCheck --input-file=%t.cirlayout %s --check-prefix=CIR-LAYOUT
+
+// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -emit-llvm -fdump-record-layouts %s -o %t.ll 1> %t.ogcglayout
+// RUN: FileCheck --input-file=%t.ogcglayout %s --check-prefix=OGCG-LAYOUT
+
+typedef struct {
+ unsigned int a : 9;
+ volatile unsigned int b : 1;
+ unsigned int c : 1;
+} st1;
+
+// CIR-LAYOUT: BitFields:[
+// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:a offset:0 size:9 isSigned:0 storageSize:16 storageOffset:0 volatileOffset:0 volatileStorageSize:32 volatileStorageOffset:0>
+// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:b offset:9 size:1 isSigned:0 storageSize:16 storageOffset:0 volatileOffset:9 volatileStorageSize:32 volatileStorageOffset:0>
+// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:c offset:10 size:1 isSigned:0 storageSize:16 storageOffset:0 volatileOffset:10 volatileStorageSize:32 volatileStorageOffset:0>
+
+// OGCG-LAYOUT: BitFields:[
+// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:9 IsSigned:0 StorageSize:16 StorageOffset:0 VolatileOffset:0 VolatileStorageSize:32 VolatileStorageOffset:0>
+// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:9 Size:1 IsSigned:0 StorageSize:16 StorageOffset:0 VolatileOffset:9 VolatileStorageSize:32 VolatileStorageOffset:0>
+// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:10 Size:1 IsSigned:0 StorageSize:16 StorageOffset:0 VolatileOffset:10 VolatileStorageSize:32 VolatileStorageOffset:0>
+
+// different base types
+typedef struct{
+ volatile short a : 3;
+ volatile int b: 13;
+ volatile long c : 5;
+} st2;
+
+// CIR-LAYOUT: BitFields:[
+// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:a offset:0 size:3 isSigned:1 storageSize:32 storageOffset:0 volatileOffset:0 volatileStorageSize:16 volatileStorageOffset:0>
+// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:b offset:3 size:13 isSigned:1 storageSize:32 storageOffset:0 volatileOffset:3 volatileStorageSize:32 volatileStorageOffset:0>
+// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:c offset:16 size:5 isSigned:1 storageSize:32 storageOffset:0 volatileOffset:16 volatileStorageSize:64 volatileStorageOffset:0>
+
+// OGCG-LAYOUT: BitFields:[
+// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:3 IsSigned:1 StorageSize:32 StorageOffset:0 VolatileOffset:0 VolatileStorageSize:16 VolatileStorageOffset:0>
+// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:3 Size:13 IsSigned:1 StorageSize:32 StorageOffset:0 VolatileOffset:3 VolatileStorageSize:32 VolatileStorageOffset:0>
+// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:16 Size:5 IsSigned:1 StorageSize:32 StorageOffset:0 VolatileOffset:16 VolatileStorageSize:64 VolatileStorageOffset:0>
+
+typedef struct{
+ volatile unsigned int a : 3;
+ unsigned int : 0; // zero-length bit-field force next field to aligned int boundary
+ volatile unsigned int b : 5;
+} st3;
+
+// CIR-LAYOUT: BitFields:[
+// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:a offset:0 size:3 isSigned:0 storageSize:8 storageOffset:0 volatileOffset:0 volatileStorageSize:32 volatileStorageOffset:0>
+// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:b offset:0 size:5 isSigned:0 storageSize:8 storageOffset:4 volatileOffset:0 volatileStorageSize:0 volatileStorageOffset:0>
+
+// OGCG-LAYOUT: BitFields:[
+// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:8 StorageOffset:0 VolatileOffset:0 VolatileStorageSize:32 VolatileStorageOffset:0>
+// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:5 IsSigned:0 StorageSize:8 StorageOffset:4 VolatileOffset:0 VolatileStorageSize:0 VolatileStorageOffset:0>
+
+typedef struct{
+ volatile unsigned int a : 3;
+ unsigned int z: 2;
+ volatile unsigned int b : 5;
+} st4;
+
+// CIR-LAYOUT: BitFields:[
+// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:a offset:0 size:3 isSigned:0 storageSize:16 storageOffset:0 volatileOffset:0 volatileStorageSize:32 volatileStorageOffset:0>
+// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:z offset:3 size:2 isSigned:0 storageSize:16 storageOffset:0 volatileOffset:3 volatileStorageSize:32 volatileStorageOffset:0>
+// CIR-LAYOUT-NEXT: <CIRBitFieldInfo name:b offset:5 size:5 isSigned:0 storageSize:16 storageOffset:0 volatileOffset:5 volatileStorageSize:32 volatileStorageOffset:0>
+
+// OGCG-LAYOUT: BitFields:[
+// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:0 Size:3 IsSigned:0 StorageSize:16 StorageOffset:0 VolatileOffset:0 VolatileStorageSize:32 VolatileStorageOffset:0>
+// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:3 Size:2 IsSigned:0 StorageSize:16 StorageOffset:0 VolatileOffset:3 VolatileStorageSize:32 VolatileStorageOffset:0>
+// OGCG-LAYOUT-NEXT: <CGBitFieldInfo Offset:5 Size:5 IsSigned:0 StorageSize:16 StorageOffset:0 VolatileOffset:5 VolatileStorageSize:32 VolatileStorageOffset:0>
+
+st1 s1;
+st2 s2;
+st3 s3;
+st4 s4;
diff --git a/clang/test/CIR/CodeGen/complex-mul-div.cpp b/clang/test/CIR/CodeGen/complex-mul-div.cpp
new file mode 100644
index 0000000..9d71ef7
--- /dev/null
+++ b/clang/test/CIR/CodeGen/complex-mul-div.cpp
@@ -0,0 +1,280 @@
+// complex-range basic
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -complex-range=basic -Wno-unused-value -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-canonicalize -o %t.cir %s 2>&1 | FileCheck --check-prefix=CIR-BEFORE-BASIC %s
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=basic -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s --check-prefixes=CIR-AFTER-INT,CIR-AFTER-MUL-COMBINED
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=basic -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefixes=LLVM-INT,LLVM-MUL-COMBINED
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=basic -Wno-unused-value -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s --check-prefixes=OGCG-INT,OGCG-MUL-COMBINED
+
+// complex-range improved
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -complex-range=improved -Wno-unused-value -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-canonicalize -o %t.cir %s 2>&1 | FileCheck --check-prefix=CIR-BEFORE-IMPROVED %s
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=improved -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s --check-prefixes=CIR-AFTER-INT,CIR-AFTER-MUL-COMBINED
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=improved -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefixes=LLVM-INT,LLVM-MUL-COMBINED
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=improved -Wno-unused-value -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s --check-prefixes=OGCG-INT,OGCG-MUL-COMBINED
+
+// complex-range promoted
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -complex-range=promoted -Wno-unused-value -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-canonicalize -o %t.cir %s 2>&1 | FileCheck --check-prefix=CIR-BEFORE-PROMOTED %s
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=promoted -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s --check-prefixes=CIR-AFTER-INT,CIR-AFTER-MUL-COMBINED
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=promoted -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefixes=LLVM-INT,LLVM-MUL-COMBINED
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=promoted -Wno-unused-value -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s --check-prefixes=OGCG-INT,OGCG-MUL-COMBINED
+
+// complex-range full
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -complex-range=full -Wno-unused-value -fclangir -emit-cir -mmlir --mlir-print-ir-before=cir-canonicalize -o %t.cir %s 2>&1 | FileCheck --check-prefix=CIR-BEFORE-FULL %s
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=full -Wno-unused-value -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s --check-prefixes=CIR-AFTER-FULL,CIR-AFTER-INT
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=full -Wno-unused-value -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefixes=LLVM-FULL,LLVM-INT
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -complex-range=full -Wno-unused-value -emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s --check-prefixes=OGCG-FULL,OGCG-INT
+
+void foo() {
+ float _Complex a;
+ float _Complex b;
+ float _Complex c = a * b;
+}
+
+// CIR-BEFORE-BASIC: %{{.*}} = cir.complex.mul {{.*}}, {{.*}} range(basic) : !cir.complex<!cir.float>
+
+// CIR-BEFORE-IMPROVED: %{{.*}} = cir.complex.mul {{.*}}, {{.*}} range(improved) : !cir.complex<!cir.float>
+
+// CIR-BEFORE-PROMOTED: %{{.*}} = cir.complex.mul {{.*}}, {{.*}} range(promoted) : !cir.complex<!cir.float>
+
+// CIR-AFTER-MUL-COMBINED: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR-AFTER-MUL-COMBINED: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR-AFTER-MUL-COMBINED: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-AFTER-MUL-COMBINED: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-MUL-COMBINED: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-MUL-COMBINED: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-MUL-COMBINED: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-MUL-COMBINED: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-MUL-COMBINED: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-MUL-COMBINED: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_REAL]]) : !cir.float
+// CIR-AFTER-MUL-COMBINED: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_IMAG]]) : !cir.float
+// CIR-AFTER-MUL-COMBINED: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_IMAG]]) : !cir.float
+// CIR-AFTER-MUL-COMBINED: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_REAL]]) : !cir.float
+// CIR-AFTER-MUL-COMBINED: %[[C_REAL:.*]] = cir.binop(sub, %[[MUL_AR_BR]], %[[MUL_AI_BI]]) : !cir.float
+// CIR-AFTER-MUL-COMBINED: %[[C_IMAG:.*]] = cir.binop(add, %[[MUL_AR_BI]], %[[MUL_AI_BR]]) : !cir.float
+// CIR-AFTER-MUL-COMBINED: %[[RESULT:.*]] = cir.complex.create %[[C_REAL]], %[[C_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-MUL-COMBINED: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-MUL-COMBINED: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-MUL-COMBINED: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-MUL-COMBINED: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-MUL-COMBINED: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM-MUL-COMBINED: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM-MUL-COMBINED: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM-MUL-COMBINED: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM-MUL-COMBINED: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM-MUL-COMBINED: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM-MUL-COMBINED: %[[MUL_AR_BR:.*]] = fmul float %[[A_REAL]], %[[B_REAL]]
+// LLVM-MUL-COMBINED: %[[MUL_AI_BI:.*]] = fmul float %[[A_IMAG]], %[[B_IMAG]]
+// LLVM-MUL-COMBINED: %[[MUL_AR_BI:.*]] = fmul float %[[A_REAL]], %[[B_IMAG]]
+// LLVM-MUL-COMBINED: %[[MUL_AI_BR:.*]] = fmul float %[[A_IMAG]], %[[B_REAL]]
+// LLVM-MUL-COMBINED: %[[C_REAL:.*]] = fsub float %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// LLVM-MUL-COMBINED: %[[C_IMAG:.*]] = fadd float %[[MUL_AR_BI]], %[[MUL_AI_BR]]
+// LLVM-MUL-COMBINED: %[[MUL_A_B:.*]] = insertvalue { float, float } {{.*}}, float %[[C_REAL]], 0
+// LLVM-MUL-COMBINED: %[[RESULT:.*]] = insertvalue { float, float } %[[MUL_A_B]], float %[[C_IMAG]], 1
+// LLVM-MUL-COMBINED: store { float, float } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-MUL-COMBINED: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-MUL-COMBINED: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-MUL-COMBINED: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-MUL-COMBINED: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG-MUL-COMBINED: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG-MUL-COMBINED: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG-MUL-COMBINED: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG-MUL-COMBINED: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG-MUL-COMBINED: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
+// OGCG-MUL-COMBINED: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG-MUL-COMBINED: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
+// OGCG-MUL-COMBINED: %[[MUL_AR_BR:.*]] = fmul float %[[A_REAL]], %[[B_REAL]]
+// OGCG-MUL-COMBINED: %[[MUL_AI_BI:.*]] = fmul float %[[A_IMAG]], %[[B_IMAG]]
+// OGCG-MUL-COMBINED: %[[MUL_AR_BI:.*]] = fmul float %[[A_REAL]], %[[B_IMAG]]
+// OGCG-MUL-COMBINED: %[[MUL_AI_BR:.*]] = fmul float %[[A_IMAG]], %[[B_REAL]]
+// OGCG-MUL-COMBINED: %[[C_REAL:.*]] = fsub float %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// OGCG-MUL-COMBINED: %[[C_IMAG:.*]] = fadd float %[[MUL_AR_BI]], %[[MUL_AI_BR]]
+// OGCG-MUL-COMBINED: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-MUL-COMBINED: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-MUL-COMBINED: store float %[[C_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-MUL-COMBINED: store float %[[C_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+// CIR-BEFORE-FULL: %{{.*}} = cir.complex.mul {{.*}}, {{.*}} range(full) : !cir.complex<!cir.float>
+
+// CIR-AFTER-FULL: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR-AFTER-FULL: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
+// CIR-AFTER-FULL: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c", init]
+// CIR-AFTER-FULL: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-FULL: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER-FULL: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER-FULL: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_REAL]]) : !cir.float
+// CIR-AFTER-FULL: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_IMAG]]) : !cir.float
+// CIR-AFTER-FULL: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_IMAG]]) : !cir.float
+// CIR-AFTER-FULL: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_REAL]]) : !cir.float
+// CIR-AFTER-FULL: %[[C_REAL:.*]] = cir.binop(sub, %[[MUL_AR_BR]], %[[MUL_AI_BI]]) : !cir.float
+// CIR-AFTER-FULL: %[[C_IMAG:.*]] = cir.binop(add, %[[MUL_AR_BI]], %[[MUL_AI_BR]]) : !cir.float
+// CIR-AFTER-FULL: %[[COMPLEX:.*]] = cir.complex.create %[[C_REAL]], %[[C_IMAG]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER-FULL: %[[IS_C_REAL_NAN:.*]] = cir.cmp(ne, %[[C_REAL]], %[[C_REAL]]) : !cir.float, !cir.bool
+// CIR-AFTER-FULL: %[[IS_C_IMAG_NAN:.*]] = cir.cmp(ne, %[[C_IMAG]], %[[C_IMAG]]) : !cir.float, !cir.bool
+// CIR-AFTER-FULL: %[[CONST_FALSE:.*]] = cir.const #false
+// CIR-AFTER-FULL: %[[SELECT_CONDITION:.*]] = cir.select if %[[IS_C_REAL_NAN]] then %[[IS_C_IMAG_NAN]] else %[[CONST_FALSE]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool
+// CIR-AFTER-FULL: %[[RESULT:.*]] = cir.ternary(%[[SELECT_CONDITION]], true {
+// CIR-AFTER-FULL: %[[LIBC_COMPLEX:.*]] = cir.call @__mulsc3(%[[A_REAL]], %[[A_IMAG]], %[[B_REAL]], %[[B_IMAG]]) : (!cir.float, !cir.float, !cir.float, !cir.float) -> !cir.complex<!cir.float>
+// CIR-AFTER-FULL: cir.yield %[[LIBC_COMPLEX]] : !cir.complex<!cir.float>
+// CIR-AFTER-FULL: }, false {
+// CIR-AFTER-FULL: cir.yield %[[COMPLEX]] : !cir.complex<!cir.float>
+// CIR-AFTER-FULL: }) : (!cir.bool) -> !cir.complex<!cir.float>
+// CIR-AFTER-FULL: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM-FULL: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-FULL: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-FULL: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM-FULL: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM-FULL: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
+// LLVM-FULL: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
+// LLVM-FULL: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
+// LLVM-FULL: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
+// LLVM-FULL: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
+// LLVM-FULL: %[[MUL_AR_BR:.*]] = fmul float %[[A_REAL]], %[[B_REAL]]
+// LLVM-FULL: %[[MUL_AI_BI:.*]] = fmul float %[[A_IMAG]], %[[B_IMAG]]
+// LLVM-FULL: %[[MUL_AR_BI:.*]] = fmul float %[[A_REAL]], %[[B_IMAG]]
+// LLVM-FULL: %[[MUL_AI_BR:.*]] = fmul float %[[A_IMAG]], %[[B_REAL]]
+// LLVM-FULL: %[[C_REAL:.*]] = fsub float %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// LLVM-FULL: %[[C_IMAG:.*]] = fadd float %[[MUL_AR_BI]], %[[MUL_AI_BR]]
+// LLVM-FULL: %[[MUL_A_B:.*]] = insertvalue { float, float } {{.*}}, float %[[C_REAL]], 0
+// LLVM-FULL: %[[COMPLEX:.*]] = insertvalue { float, float } %[[MUL_A_B]], float %[[C_IMAG]], 1
+// LLVM-FULL: %[[IS_C_REAL_NAN:.*]] = fcmp une float %[[C_REAL]], %[[C_REAL]]
+// LLVM-FULL: %[[IS_C_IMAG_NAN:.*]] = fcmp une float %[[C_IMAG]], %[[C_IMAG]]
+// LLVM-FULL: %[[SELECT_CONDITION:.*]] = and i1 %[[IS_C_REAL_NAN]], %[[IS_C_IMAG_NAN]]
+// LLVM-FULL: br i1 %[[SELECT_CONDITION]], label %[[THEN_LABEL:.*]], label %[[ELSE_LABEL:.*]]
+// LLVM-FULL: [[THEN_LABEL]]:
+// LLVM-FULL: %[[LIBC_COMPLEX:.*]] = call { float, float } @__mulsc3(float %[[A_REAL]], float %[[A_IMAG]], float %[[B_REAL]], float %[[B_IMAG]])
+// LLVM-FULL: br label %[[PHI_BRANCH:.*]]
+// LLVM-FULL: [[ELSE_LABEL]]:
+// LLVM-FULL: br label %[[PHI_BRANCH:]]
+// LLVM-FULL: [[PHI_BRANCH:]]:
+// LLVM-FULL: %[[RESULT:.*]] = phi { float, float } [ %[[COMPLEX]], %[[ELSE_LABEL]] ], [ %[[LIBC_COMPLEX]], %[[THEN_LABEL]] ]
+// LLVM-FULL: br label %[[END_LABEL:.*]]
+// LLVM-FULL: [[END_LABEL]]:
+// LLVM-FULL: store { float, float } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-FULL: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-FULL: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-FULL: %[[C_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-FULL: %[[COMPLEX_CALL_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG-FULL: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG-FULL: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG-FULL: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG-FULL: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG-FULL: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG-FULL: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
+// OGCG-FULL: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG-FULL: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
+// OGCG-FULL: %[[MUL_AR_BR:.*]] = fmul float %[[A_REAL]], %[[B_REAL]]
+// OGCG-FULL: %[[MUL_AI_BI:.*]] = fmul float %[[A_IMAG]], %[[B_IMAG]]
+// OGCG-FULL: %[[MUL_AR_BI:.*]] = fmul float %[[A_REAL]], %[[B_IMAG]]
+// OGCG-FULL: %[[MUL_AI_BR:.*]] = fmul float %[[A_IMAG]], %[[B_REAL]]
+// OGCG-FULL: %[[C_REAL:.*]] = fsub float %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// OGCG-FULL: %[[C_IMAG:.*]] = fadd float %[[MUL_AR_BI]], %[[MUL_AI_BR]]
+// OGCG-FULL: %[[IS_C_REAL_NAN:.*]] = fcmp uno float %[[C_REAL]], %[[C_REAL]]
+// OGCG-FULL: br i1 %[[IS_C_REAL_NAN]], label %[[COMPLEX_IS_IMAG_NAN:.*]], label %[[END_LABEL:.*]], !prof !2
+// OGCG-FULL: [[COMPLEX_IS_IMAG_NAN]]:
+// OGCG-FULL: %[[IS_C_IMAG_NAN:.*]] = fcmp uno float %[[C_IMAG]], %[[C_IMAG]]
+// OGCG-FULL: br i1 %[[IS_C_IMAG_NAN]], label %[[COMPLEX_LIB_CALL:.*]], label %[[END_LABEL]], !prof !2
+// OGCG-FULL: [[COMPLEX_LIB_CALL]]:
+// OGCG-FULL: %[[CALL_RESULT:.*]] = call noundef <2 x float> @__mulsc3(float noundef %[[A_REAL]], float noundef %[[A_IMAG]], float noundef %[[B_REAL]], float noundef %[[B_IMAG]])
+// OGCG-FULL: store <2 x float> %[[CALL_RESULT]], ptr %[[COMPLEX_CALL_ADDR]], align 4
+// OGCG-FULL: %[[COMPLEX_CALL_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_CALL_ADDR]], i32 0, i32 0
+// OGCG-FULL: %[[COMPLEX_CALL_REAL:.*]] = load float, ptr %[[COMPLEX_CALL_REAL_PTR]], align 4
+// OGCG-FULL: %[[COMPLEX_CALL_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_CALL_ADDR]], i32 0, i32 1
+// OGCG-FULL: %[[COMPLEX_CALL_IMAG:.*]] = load float, ptr %[[COMPLEX_CALL_IMAG_PTR]], align 4
+// OGCG-FULL: br label %[[END_LABEL]]
+// OGCG-FULL: [[END_LABEL]]:
+// OGCG-FULL: %[[FINAL_REAL:.*]] = phi float [ %[[C_REAL]], %[[ENTRY:.*]] ], [ %[[C_REAL]], %[[COMPLEX_IS_IMAG_NAN]] ], [ %[[COMPLEX_CALL_REAL]], %[[COMPLEX_LIB_CALL]] ]
+// OGCG-FULL: %[[FINAL_IMAG:.*]] = phi float [ %[[C_IMAG]], %[[ENTRY]] ], [ %[[C_IMAG]], %[[COMPLEX_IS_IMAG_NAN]] ], [ %[[COMPLEX_CALL_IMAG]], %[[COMPLEX_LIB_CALL]] ]
+// OGCG-FULL: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-FULL: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-FULL: store float %[[FINAL_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-FULL: store float %[[FINAL_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+void foo1() {
+ int _Complex a;
+ int _Complex b;
+ int _Complex c = a * b;
+}
+
+// CIR-BEFORE-BASIC: %{{.*}} = cir.complex.mul {{.*}}, {{.*}} range(basic) : !cir.complex<!s32i>
+
+// CIR-BEFORE-IMPROVED: %{{.*}} = cir.complex.mul {{.*}}, {{.*}} range(improved) : !cir.complex<!s32i>
+
+// CIR-BEFORE-PROMOTED: %{{.*}} = cir.complex.mul {{.*}}, {{.*}} range(promoted) : !cir.complex<!s32i>
+
+// CIR-BEFORE-FULL: %{{.*}} = cir.complex.mul {{.*}}, {{.*}} range(full) : !cir.complex<!s32i>
+
+// CIR-AFTER-INT: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a"]
+// CIR-AFTER-INT: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["b"]
+// CIR-AFTER-INT: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["c", init]
+// CIR-AFTER-INT: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR-AFTER-INT: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR-AFTER-INT: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
+// CIR-AFTER-INT: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
+// CIR-AFTER-INT: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!s32i> -> !s32i
+// CIR-AFTER-INT: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!s32i> -> !s32i
+// CIR-AFTER-INT: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_REAL]]) : !s32i
+// CIR-AFTER-INT: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_IMAG]]) : !s32i
+// CIR-AFTER-INT: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_IMAG]]) : !s32i
+// CIR-AFTER-INT: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_REAL]]) : !s32i
+// CIR-AFTER-INT: %[[C_REAL:.*]] = cir.binop(sub, %[[MUL_AR_BR]], %[[MUL_AI_BI]]) : !s32i
+// CIR-AFTER-INT: %[[C_IMAG:.*]] = cir.binop(add, %[[MUL_AR_BI]], %[[MUL_AI_BR]]) : !s32i
+// CIR-AFTER-INT: %[[RESULT:.*]] = cir.complex.create %[[C_REAL]], %[[C_IMAG]] : !s32i -> !cir.complex<!s32i>
+// CIR-AFTER-INT: cir.store{{.*}} %[[RESULT]], %[[C_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
+
+// LLVM-INT: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM-INT: %[[B_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM-INT: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM-INT: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4
+// LLVM-INT: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4
+// LLVM-INT: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0
+// LLVM-INT: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1
+// LLVM-INT: %[[B_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 0
+// LLVM-INT: %[[B_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 1
+// LLVM-INT: %[[MUL_AR_BR:.*]] = mul i32 %[[A_REAL]], %[[B_REAL]]
+// LLVM-INT: %[[MUL_AI_BI:.*]] = mul i32 %[[A_IMAG]], %[[B_IMAG]]
+// LLVM-INT: %[[MUL_AR_BI:.*]] = mul i32 %[[A_REAL]], %[[B_IMAG]]
+// LLVM-INT: %[[MUL_AI_BR:.*]] = mul i32 %[[A_IMAG]], %[[B_REAL]]
+// LLVM-INT: %[[C_REAL:.*]] = sub i32 %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// LLVM-INT: %[[C_IMAG:.*]] = add i32 %[[MUL_AR_BI]], %[[MUL_AI_BR]]
+// LLVM-INT: %[[MUL_A_B:.*]] = insertvalue { i32, i32 } {{.*}}, i32 %[[C_REAL]], 0
+// LLVM-INT: %[[RESULT:.*]] = insertvalue { i32, i32 } %[[MUL_A_B]], i32 %[[C_IMAG]], 1
+// LLVM-INT: store { i32, i32 } %[[RESULT]], ptr %[[C_ADDR]], align 4
+
+// OGCG-INT: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG-INT: %[[B_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG-INT: %[[C_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG-INT: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG-INT: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4
+// OGCG-INT: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG-INT: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4
+// OGCG-INT: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG-INT: %[[B_REAL:.*]] = load i32, ptr %[[B_REAL_PTR]], align 4
+// OGCG-INT: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG-INT: %[[B_IMAG:.*]] = load i32, ptr %[[B_IMAG_PTR]], align 4
+// OGCG-INT: %[[MUL_AR_BR:.*]] = mul i32 %[[A_REAL]], %[[B_REAL]]
+// OGCG-INT: %[[MUL_AI_BI:.*]] = mul i32 %[[A_IMAG]], %[[B_IMAG]]
+// OGCG-INT: %[[C_REAL:.*]] = sub i32 %[[MUL_AR_BR]], %[[MUL_AI_BI]]
+// OGCG-INT: %[[MUL_AI_BR:.*]] = mul i32 %[[A_IMAG]], %[[B_REAL]]
+// OGCG-INT: %[[MUL_AR_BI:.*]] = mul i32 %[[A_REAL]], %[[B_IMAG]]
+// OGCG-INT: %[[C_IMAG:.*]] = add i32 %[[MUL_AI_BR]], %[[MUL_AR_BI]]
+// OGCG-INT: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 0
+// OGCG-INT: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1
+// OGCG-INT: store i32 %[[C_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG-INT: store i32 %[[C_IMAG]], ptr %[[C_IMAG_PTR]], align 4
diff --git a/clang/test/CIR/CodeGen/complex-unary.cpp b/clang/test/CIR/CodeGen/complex-unary.cpp
index 676b554..4cd81eb 100644
--- a/clang/test/CIR/CodeGen/complex-unary.cpp
+++ b/clang/test/CIR/CodeGen/complex-unary.cpp
@@ -284,3 +284,89 @@ void foo6() {
// OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
// OGCG: store float %[[A_REAL_DEC]], ptr %[[RESULT_REAL_PTR]], align 4
// OGCG: store float %[[A_IMAG]], ptr %[[RESULT_IMAG_PTR]], align 4
+
+void foo7() {
+ float _Complex a;
+ float _Complex b = +a;
+}
+
+// CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
+// CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[COMPLEX_PLUS:.*]] = cir.unary(plus, %[[TMP]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
+// CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_PLUS]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR-AFTER: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
+// CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[REAL_PLUS:.*]] = cir.unary(plus, %[[REAL]]) : !cir.float, !cir.float
+// CIR-AFTER: %[[IMAG_PLUS:.*]] = cir.unary(plus, %[[IMAG]]) : !cir.float, !cir.float
+// CIR-AFTER: %[[NEW_COMPLEX:.*]] = cir.complex.create %[[REAL_PLUS]], %[[IMAG_PLUS]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER: cir.store{{.*}} %[[NEW_COMPLEX]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[TMP:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM: %[[REAL:.*]] = extractvalue { float, float } %[[TMP]], 0
+// LLVM: %[[IMAG:.*]] = extractvalue { float, float } %[[TMP]], 1
+// LLVM: %[[RESULT_TMP:.*]] = insertvalue { float, float } {{.*}}, float %[[REAL]], 0
+// LLVM: %[[RESULT_VAL:.*]] = insertvalue { float, float } %[[RESULT_TMP]], float %[[IMAG]], 1
+// LLVM: store { float, float } %[[RESULT_VAL]], ptr %[[B_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: store float %[[A_REAL]], ptr %[[B_REAL_PTR]], align 4
+// OGCG: store float %[[A_IMAG]], ptr %[[B_IMAG_PTR]], align 4
+
+void foo8() {
+ float _Complex a;
+ float _Complex b = -a;
+}
+
+// CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
+// CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[COMPLEX_MINUS:.*]] = cir.unary(minus, %[[TMP]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
+// CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_MINUS]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
+// CIR-AFTER: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
+// CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
+// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[REAL_MINUS:.*]] = cir.unary(minus, %[[REAL]]) : !cir.float, !cir.float
+// CIR-AFTER: %[[IMAG_MINUS:.*]] = cir.unary(minus, %[[IMAG]]) : !cir.float, !cir.float
+// CIR-AFTER: %[[NEW_COMPLEX:.*]] = cir.complex.create %[[REAL_MINUS]], %[[IMAG_MINUS]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER: cir.store{{.*}} %[[NEW_COMPLEX]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
+// LLVM: %[[TMP:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
+// LLVM: %[[REAL:.*]] = extractvalue { float, float } %[[TMP]], 0
+// LLVM: %[[IMAG:.*]] = extractvalue { float, float } %[[TMP]], 1
+// LLVM: %[[REAL_MINUS:.*]] = fneg float %[[REAL]]
+// LLVM: %[[IMAG_MINUS:.*]] = fneg float %[[IMAG]]
+// LLVM: %[[RESULT_TMP:.*]] = insertvalue { float, float } {{.*}}, float %[[REAL_MINUS]], 0
+// LLVM: %[[RESULT_VAL:.*]] = insertvalue { float, float } %[[RESULT_TMP]], float %[[IMAG_MINUS]], 1
+// LLVM: store { float, float } %[[RESULT_VAL]], ptr %[[B_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
+// OGCG: %[[A_REAL_MINUS:.*]] = fneg float %[[A_REAL]]
+// OGCG: %[[A_IMAG_MINUS:.*]] = fneg float %[[A_IMAG]]
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: store float %[[A_REAL_MINUS]], ptr %[[B_REAL_PTR]], align 4
+// OGCG: store float %[[A_IMAG_MINUS]], ptr %[[B_IMAG_PTR]], align 4
diff --git a/clang/test/CIR/CodeGen/empty.cpp b/clang/test/CIR/CodeGen/empty.cpp
new file mode 100644
index 0000000..378ae21
--- /dev/null
+++ b/clang/test/CIR/CodeGen/empty.cpp
@@ -0,0 +1,32 @@
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s --check-prefix=CIR
+
+// These declarations shouldn't emit any code. Therefore the module is expected to be empty.
+
+template<typename T>
+concept some_concept = true;
+
+template<some_concept T>
+class class_template {};
+
+; // Empty declaration
+
+template<typename T>
+void function_template();
+
+static_assert(true, "top level static assert");
+
+template<typename T>
+using type_alias = T;
+
+namespace N {
+ using ::class_template; // UsingShadow
+}
+
+template<typename T>
+struct deduction_guide {};
+
+deduction_guide() -> deduction_guide<int>;
+
+// CIR: module {{.*}} {
+// CIR-NEXT: }
diff --git a/clang/test/CIR/CodeGen/variable-decomposition.cpp b/clang/test/CIR/CodeGen/variable-decomposition.cpp
new file mode 100644
index 0000000..022d06a
--- /dev/null
+++ b/clang/test/CIR/CodeGen/variable-decomposition.cpp
@@ -0,0 +1,55 @@
+// RUN: %clang_cc1 -std=c++17 -triple x86_64-pc-linux-gnu -fclangir -emit-cir %s -o %t.cir
+// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
+// RUN: %clang_cc1 -std=c++17 -triple x86_64-pc-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s
+// RUN: %clang_cc1 -std=c++17 -triple x86_64-pc-linux-gnu -emit-llvm %s -o %t.ll
+// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s
+
+struct some_struct {
+ int a;
+ float b;
+};
+
+float function() {
+ auto[a, b] = some_struct{1, 2.f};
+
+ return a + b;
+}
+
+// CIR-LABEL: cir.func dso_local @_Z8functionv() -> !cir.float
+// CIR: %[[RETVAL:.+]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["__retval"]
+// CIR: %[[STRUCT:.+]] = cir.alloca !rec_some_struct, !cir.ptr<!rec_some_struct>, [""]
+// CIR: %[[MEMBER_A:.+]] = cir.get_member %[[STRUCT]][0] {name = "a"} : !cir.ptr<!rec_some_struct> -> !cir.ptr<!s32i>
+// CIR: %[[LOAD_A:.+]] = cir.load align(4) %[[MEMBER_A]] : !cir.ptr<!s32i>, !s32i
+// CIR: %[[CAST_A:.+]] = cir.cast(int_to_float, %[[LOAD_A]] : !s32i), !cir.float
+// CIR: %[[MEMBER_B:.+]] = cir.get_member %[[STRUCT]][1] {name = "b"} : !cir.ptr<!rec_some_struct> -> !cir.ptr<!cir.float>
+// CIR: %[[LOAD_B:.+]] = cir.load align(4) %[[MEMBER_B]] : !cir.ptr<!cir.float>, !cir.float
+// CIR: %[[ADD:.+]] = cir.binop(add, %[[CAST_A]], %[[LOAD_B]]) : !cir.float
+// CIR: cir.store %[[ADD]], %[[RETVAL]] : !cir.float, !cir.ptr<!cir.float>
+// CIR: %[[RET:.+]] = cir.load %[[RETVAL]] : !cir.ptr<!cir.float>, !cir.float
+// CIR: cir.return %[[RET]] : !cir.float
+
+// LLVM-LABEL: define dso_local float @_Z8functionv()
+// LLVM: %[[RETVAL:.+]] = alloca float, i64 1
+// LLVM: %[[STRUCT:.+]] = alloca %struct.some_struct, i64 1
+// LLVM: %[[GEP_A:.+]] = getelementptr %struct.some_struct, ptr %[[STRUCT]], i32 0, i32 0
+// LLVM: %[[LOAD_A:.+]] = load i32, ptr %[[GEP_A]]
+// LLVM: %[[CAST_A:.+]] = sitofp i32 %[[LOAD_A]] to float
+// LLVM: %[[GEP_B:.+]] = getelementptr %struct.some_struct, ptr %[[STRUCT]], i32 0, i32 1
+// LLVM: %[[LOAD_B:.+]] = load float, ptr %[[GEP_B]]
+// LLVM: %[[ADD:.+]] = fadd float %[[CAST_A]], %[[LOAD_B]]
+// LLVM: store float %[[ADD]], ptr %[[RETVAL]]
+// LLVM: %[[RET:.+]] = load float, ptr %[[RETVAL]]
+// LLVM: ret float %[[RET]]
+
+// OGCG: @__const._Z8functionv.{{.*}} = private unnamed_addr constant %struct.some_struct { i32 1, float 2.000000e+00 }
+// OGCG-LABEL: define dso_local noundef float @_Z8functionv()
+// OGCG: %[[STRUCT:.+]] = alloca %struct.some_struct
+// OGCG: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[STRUCT]], ptr align 4 @__const._Z8functionv.{{.*}}, i64 8, i1 false)
+// OGCG: %[[GEP_A:.+]] = getelementptr inbounds nuw %struct.some_struct, ptr %[[STRUCT]], i32 0, i32 0
+// OGCG: %[[LOAD_A:.+]] = load i32, ptr %[[GEP_A]]
+// OGCG: %[[CAST_A:.+]] = sitofp i32 %[[LOAD_A]] to float
+// OGCG: %[[GEP_B:.+]] = getelementptr inbounds nuw %struct.some_struct, ptr %[[STRUCT]], i32 0, i32 1
+// OGCG: %[[LOAD_B:.+]] = load float, ptr %[[GEP_B]]
+// OGCG: %[[ADD:.+]] = fadd float %[[CAST_A]], %[[LOAD_B]]
+// OGCG: ret float %[[ADD]]
diff --git a/clang/test/CXX/expr/expr.const/p2-0x.cpp b/clang/test/CXX/expr/expr.const/p2-0x.cpp
index 910c863..8401d30 100644
--- a/clang/test/CXX/expr/expr.const/p2-0x.cpp
+++ b/clang/test/CXX/expr/expr.const/p2-0x.cpp
@@ -438,6 +438,11 @@ namespace ReinterpretCast {
struct U {
int m : (long)(S*)6; // expected-warning {{constant expression}} expected-note {{reinterpret_cast}}
};
+ void f();
+ constexpr void* fp1 = (void*)f; // expected-error {{constant expression}} expected-note {{reinterpret_cast}}
+ constexpr int* fp2 = (int*)f; // expected-error {{constant expression}} expected-note {{reinterpret_cast}}
+ constexpr int (*fp3)() = (int(*)())f; // expected-error {{constant expression}} expected-note {{reinterpret_cast}}
+ constexpr int (&fp4)() = (int(&)())f; // expected-error {{constant expression}} expected-note {{reinterpret_cast}}
}
// - a pseudo-destructor call (5.2.4);
diff --git a/clang/test/CodeCompletion/skip-explicit-object-parameter.cpp b/clang/test/CodeCompletion/skip-explicit-object-parameter.cpp
index 55c16bb..587d6cb 100644
--- a/clang/test/CodeCompletion/skip-explicit-object-parameter.cpp
+++ b/clang/test/CodeCompletion/skip-explicit-object-parameter.cpp
@@ -1,14 +1,48 @@
struct A {
- void foo(this A self, int arg);
+ void foo(this auto&& self, int arg);
+ void bar(this A self, int arg);
};
-int main() {
+int func1() {
A a {};
a.
}
-// RUN: %clang_cc1 -cc1 -fsyntax-only -code-completion-at=%s:%(line-2):5 -std=c++23 %s | FileCheck %s
-// CHECK: COMPLETION: A : A::
-// CHECK-NEXT: COMPLETION: foo : [#void#]foo(<#int arg#>)
-// CHECK-NEXT: COMPLETION: operator= : [#A &#]operator=(<#const A &#>)
-// CHECK-NEXT: COMPLETION: operator= : [#A &#]operator=(<#A &&#>)
-// CHECK-NEXT: COMPLETION: ~A : [#void#]~A()
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-2):5 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC1 %s
+// CHECK-CC1: COMPLETION: A : A::
+// CHECK-NEXT-CC1: COMPLETION: bar : [#void#]bar(<#int arg#>)
+// CHECK-NEXT-CC1: COMPLETION: foo : [#void#]foo(<#int arg#>)
+// CHECK-NEXT-CC1: COMPLETION: operator= : [#A &#]operator=(<#const A &#>)
+// CHECK-NEXT-CC1: COMPLETION: operator= : [#A &#]operator=(<#A &&#>)
+// CHECK-NEXT-CC1: COMPLETION: ~A : [#void#]~A()
+
+struct B {
+ template <typename T>
+ void foo(this T&& self, int arg);
+};
+
+int func2() {
+ B b {};
+ b.foo();
+}
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-2):9 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC2 %s
+// CHECK-CC2: OVERLOAD: [#void#]foo(int arg)
+
+// TODO: llvm/llvm-project/146649
+// This is incorrect behavior. Correct Result should be a variant of,
+// CC3: should be something like [#void#]foo(<#A self#>, <#int arg#>)
+// CC4: should be something like [#void#]bar(<#A self#>, <#int arg#>)
+int func3() {
+ (&A::foo)
+ (&A::bar)
+}
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-3):10 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC3 %s
+// CHECK-CC3: COMPLETION: foo : [#void#]foo<<#class self:auto#>>(<#int arg#>)
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-4):10 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC4 %s
+// CHECK-CC4: COMPLETION: bar : [#void#]bar(<#int arg#>)
+
+int func4() {
+ // TODO (&A::foo)(
+ (&A::bar)(
+}
+// RUN: %clang_cc1 -fsyntax-only -code-completion-at=%s:%(line-2):13 -std=c++23 %s | FileCheck -check-prefix=CHECK-CC5 %s
+// CHECK-CC5: OVERLOAD: [#void#](<#A#>, int)
diff --git a/clang/test/CodeGen/attr-counted-by-for-pointers.c b/clang/test/CodeGen/attr-counted-by-for-pointers.c
index 2407654..e939e49 100644
--- a/clang/test/CodeGen/attr-counted-by-for-pointers.c
+++ b/clang/test/CodeGen/attr-counted-by-for-pointers.c
@@ -471,3 +471,80 @@ size_t test9(struct annotated_sized_ptr *p, int index) {
size_t test10(struct annotated_sized_ptr *p, int index) {
return __bdos(&((unsigned int *) p->buf)[index]);
}
+
+struct pr151236_struct {
+ int *a __counted_by(a_count);
+ short a_count;
+};
+
+// SANITIZE-WITH-ATTR-LABEL: define dso_local range(i64 -262144, 262137) i64 @test11(
+// SANITIZE-WITH-ATTR-SAME: ptr noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SANITIZE-WITH-ATTR-NEXT: entry:
+// SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
+// SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_LOAD:%.*]] = load i16, ptr [[COUNTED_BY_GEP]], align 4
+// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = icmp sgt i16 [[COUNTED_BY_LOAD]], -1
+// SANITIZE-WITH-ATTR-NEXT: [[COUNT:%.*]] = sext i16 [[COUNTED_BY_LOAD]] to i64
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAY_SIZE:%.*]] = shl nsw i64 [[COUNT]], 3
+// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = select i1 [[TMP0]], i64 [[ARRAY_SIZE]], i64 0
+// SANITIZE-WITH-ATTR-NEXT: ret i64 [[TMP1]]
+//
+// NO-SANITIZE-WITH-ATTR-LABEL: define dso_local range(i64 -262144, 262137) i64 @test11(
+// NO-SANITIZE-WITH-ATTR-SAME: ptr noundef readonly captures(none) [[P:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// NO-SANITIZE-WITH-ATTR-NEXT: entry:
+// NO-SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
+// NO-SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_LOAD:%.*]] = load i16, ptr [[COUNTED_BY_GEP]], align 4
+// NO-SANITIZE-WITH-ATTR-NEXT: [[COUNT:%.*]] = sext i16 [[COUNTED_BY_LOAD]] to i64
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAY_SIZE:%.*]] = shl nsw i64 [[COUNT]], 3
+// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = icmp sgt i16 [[COUNTED_BY_LOAD]], -1
+// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = select i1 [[TMP0]], i64 [[ARRAY_SIZE]], i64 0
+// NO-SANITIZE-WITH-ATTR-NEXT: ret i64 [[TMP1]]
+//
+// SANITIZE-WITHOUT-ATTR-LABEL: define dso_local range(i64 0, -1) i64 @test11(
+// SANITIZE-WITHOUT-ATTR-SAME: ptr noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SANITIZE-WITHOUT-ATTR-NEXT: entry:
+// SANITIZE-WITHOUT-ATTR-NEXT: ret i64 -2
+//
+// NO-SANITIZE-WITHOUT-ATTR-LABEL: define dso_local range(i64 0, -1) i64 @test11(
+// NO-SANITIZE-WITHOUT-ATTR-SAME: ptr noundef readonly captures(none) [[P:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i64 -2
+//
+size_t test11(struct pr151236_struct *p) {
+ return __bdos(p->a) + __bdos((p->a));
+}
+
+// SANITIZE-WITH-ATTR-LABEL: define dso_local range(i64 -262144, 262137) i64 @test12(
+// SANITIZE-WITH-ATTR-SAME: ptr noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SANITIZE-WITH-ATTR-NEXT: entry:
+// SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
+// SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_LOAD:%.*]] = load i16, ptr [[COUNTED_BY_GEP]], align 4
+// SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = icmp sgt i16 [[COUNTED_BY_LOAD]], -1
+// SANITIZE-WITH-ATTR-NEXT: [[COUNT:%.*]] = sext i16 [[COUNTED_BY_LOAD]] to i64
+// SANITIZE-WITH-ATTR-NEXT: [[ARRAY_SIZE:%.*]] = shl nsw i64 [[COUNT]], 3
+// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = select i1 [[TMP0]], i64 [[ARRAY_SIZE]], i64 0
+// SANITIZE-WITH-ATTR-NEXT: ret i64 [[TMP1]]
+//
+// NO-SANITIZE-WITH-ATTR-LABEL: define dso_local range(i64 -262144, 262137) i64 @test12(
+// NO-SANITIZE-WITH-ATTR-SAME: ptr noundef readonly captures(none) [[P:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// NO-SANITIZE-WITH-ATTR-NEXT: entry:
+// NO-SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_GEP:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 8
+// NO-SANITIZE-WITH-ATTR-NEXT: [[COUNTED_BY_LOAD:%.*]] = load i16, ptr [[COUNTED_BY_GEP]], align 4
+// NO-SANITIZE-WITH-ATTR-NEXT: [[COUNT:%.*]] = sext i16 [[COUNTED_BY_LOAD]] to i64
+// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAY_SIZE:%.*]] = shl nsw i64 [[COUNT]], 3
+// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = icmp sgt i16 [[COUNTED_BY_LOAD]], -1
+// NO-SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = select i1 [[TMP0]], i64 [[ARRAY_SIZE]], i64 0
+// NO-SANITIZE-WITH-ATTR-NEXT: ret i64 [[TMP1]]
+//
+// SANITIZE-WITHOUT-ATTR-LABEL: define dso_local range(i64 0, -1) i64 @test12(
+// SANITIZE-WITHOUT-ATTR-SAME: ptr noundef [[P:%.*]]) local_unnamed_addr #[[ATTR0]] {
+// SANITIZE-WITHOUT-ATTR-NEXT: entry:
+// SANITIZE-WITHOUT-ATTR-NEXT: ret i64 -2
+//
+// NO-SANITIZE-WITHOUT-ATTR-LABEL: define dso_local range(i64 0, -1) i64 @test12(
+// NO-SANITIZE-WITHOUT-ATTR-SAME: ptr noundef readonly captures(none) [[P:%.*]]) local_unnamed_addr #[[ATTR1]] {
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: entry:
+// NO-SANITIZE-WITHOUT-ATTR-NEXT: ret i64 -2
+//
+size_t test12(struct pr151236_struct *p) {
+ return __bdos(p->a) + __bdos(((int *)p->a));
+}
diff --git a/clang/test/CodeGen/dbg-info-all-calls-described.c b/clang/test/CodeGen/dbg-info-all-calls-described.c
new file mode 100644
index 0000000..3ca3aaa
--- /dev/null
+++ b/clang/test/CodeGen/dbg-info-all-calls-described.c
@@ -0,0 +1,88 @@
+// Test that call site debug info is (un)supported in various configurations.
+
+// Supported: DWARF5, -O1, standalone DI
+// RUN: %clang_cc1 -emit-llvm -triple %itanium_abi_triple %s -o - \
+// RUN: -O1 -disable-llvm-passes \
+// RUN: -debug-info-kind=standalone -dwarf-version=5 \
+// RUN: | FileCheck %s -check-prefix=HAS-ATTR \
+// RUN: -implicit-check-not=DISubprogram -implicit-check-not=DIFlagAllCallsDescribed
+
+// Supported: DWARF4 + LLDB tuning, -O1, limited DI
+// RUN: %clang_cc1 -emit-llvm -triple %itanium_abi_triple %s -o - \
+// RUN: -O1 -disable-llvm-passes \
+// RUN: -debugger-tuning=lldb \
+// RUN: -debug-info-kind=standalone -dwarf-version=4 \
+// RUN: | FileCheck %s -check-prefix=HAS-ATTR \
+// RUN: -implicit-check-not=DISubprogram -implicit-check-not=DIFlagAllCallsDescribed
+
+// Note: DIFlagAllCallsDescribed may have been enabled prematurely when tuning
+// for GDB under -gdwarf-4 in https://reviews.llvm.org/D69743. It's possible
+// this should have been 'Unsupported' until entry values emission was enabled
+// by default.
+//
+// Supported: DWARF4 + GDB tuning
+// RUN: %clang_cc1 -emit-llvm -triple x86_64-linux-gnu \
+// RUN: %s -o - -O1 -disable-llvm-passes -debugger-tuning=gdb \
+// RUN: -debug-info-kind=standalone -dwarf-version=4 \
+// RUN: | FileCheck %s -check-prefix=HAS-ATTR \
+// RUN: -implicit-check-not=DIFlagAllCallsDescribed
+
+// Supported: DWARF4 + LLDB, -O1
+// RUN: %clang_cc1 -emit-llvm -triple x86_64-linux-gnu \
+// RUN: %s -o - -O1 -disable-llvm-passes -debugger-tuning=lldb \
+// RUN: -debug-info-kind=standalone -dwarf-version=4 \
+// RUN: | FileCheck %s -check-prefix=HAS-ATTR \
+// RUN: -implicit-check-not=DIFlagAllCallsDescribed
+
+// Unsupported: -O0
+// RUN: %clang_cc1 -emit-llvm -triple x86_64-linux-gnu \
+// RUN: %s -o - -O0 -disable-llvm-passes -debugger-tuning=gdb \
+// RUN: -debug-info-kind=standalone -dwarf-version=4 \
+// RUN: | FileCheck %s -check-prefix=NO-ATTR
+
+// Supported: DWARF4 + LLDB tuning, -O1, line-tables only DI
+// RUN: %clang_cc1 -emit-llvm -triple %itanium_abi_triple %s -o - \
+// RUN: -O1 -disable-llvm-passes \
+// RUN: -debugger-tuning=lldb \
+// RUN: -debug-info-kind=line-tables-only -dwarf-version=4 \
+// RUN: | FileCheck %s -check-prefix=LINE-TABLES-ONLY
+
+// Unsupported: -O0
+// RUN: %clang_cc1 -emit-llvm -triple %itanium_abi_triple %s -o - \
+// RUN: -O0 \
+// RUN: -debug-info-kind=standalone -dwarf-version=5 \
+// RUN: | FileCheck %s -check-prefix=NO-ATTR
+
+// Unsupported: DWARF4
+// RUN: %clang_cc1 -emit-llvm -triple %itanium_abi_triple %s -o - \
+// RUN: -O1 -disable-llvm-passes \
+// RUN: -debug-info-kind=standalone -dwarf-version=4 \
+// RUN: | FileCheck %s -check-prefix=NO-ATTR
+
+// NO-ATTR-NOT: FlagAllCallsDescribed
+
+// HAS-ATTR-DAG: DISubprogram(name: "declaration1", {{.*}}, spFlags: DISPFlagOptimized)
+// HAS-ATTR-DAG: DISubprogram(name: "declaration2", {{.*}}, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized
+// HAS-ATTR-DAG: DISubprogram(name: "declaration3", {{.*}}, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized)
+// HAS-ATTR-DAG: DISubprogram(name: "declaration4", {{.*}}, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized
+
+// HAS-ATTR-DAG: DISubprogram(name: "force_irgen", {{.*}}, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition
+
+// LINE-TABLES-ONLY: DISubprogram(name: "force_irgen", {{.*}}, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition
+
+void declaration1();
+
+void declaration2();
+
+void declaration2() {}
+
+void declaration3(void);
+
+void declaration4(void);
+
+void declaration4(void) {}
+
+void __attribute__((optnone)) force_irgen(void) {
+ declaration1();
+ declaration3();
+}
diff --git a/clang/test/CodeGen/debug-info-abspath.c b/clang/test/CodeGen/debug-info-abspath.c
index b2047a7..193a72c 100644
--- a/clang/test/CodeGen/debug-info-abspath.c
+++ b/clang/test/CodeGen/debug-info-abspath.c
@@ -2,20 +2,15 @@
// RUN: cp %s %t/UNIQUEISH_SENTINEL/debug-info-abspath.c
// RUN: %clang_cc1 -debug-info-kind=limited -triple %itanium_abi_triple \
+// RUN: -fdebug-compilation-dir=%t/UNIQUEISH_SENTINEL/debug-info-abspath.c \
// RUN: %t/UNIQUEISH_SENTINEL/debug-info-abspath.c -emit-llvm -o - \
// RUN: | FileCheck %s
// RUN: cp %s %t.c
// RUN: %clang_cc1 -debug-info-kind=limited -triple %itanium_abi_triple \
+// RUN: -fdebug-compilation-dir=%t \
// RUN: %t.c -emit-llvm -o - | FileCheck %s --check-prefix=INTREE
-// RUN: cd %t/UNIQUEISH_SENTINEL
-// RUN: %clang_cc1 -debug-info-kind=limited -triple %itanium_abi_triple \
-// RUN: debug-info-abspath.c -emit-llvm -o - \
-// RUN: | FileCheck %s --check-prefix=CURDIR
-// RUN: %clang_cc1 -debug-info-kind=limited -triple %itanium_abi_triple \
-// RUN: %s -emit-llvm -o - | FileCheck %s --check-prefix=CURDIR
-
void foo(void) {}
// Since %s is an absolute path, directory should be the common
@@ -28,7 +23,3 @@ void foo(void) {}
// INTREE: = distinct !DISubprogram({{.*}}![[SPFILE:[0-9]+]]
// INTREE: DIFile({{.*}}directory: "{{.+}}CodeGen{{.*}}")
-
-// CURDIR: = distinct !DICompileUnit({{.*}}file: ![[CUFILE:[0-9]+]]
-// CURDIR: ![[CUFILE]] = !DIFile({{.*}}directory: "{{.+}}UNIQUEISH_SENTINEL")
-
diff --git a/clang/test/CodeGen/debug-info-compilation-dir.c b/clang/test/CodeGen/debug-info-compilation-dir.c
index b49a0f5..5f5542c 100644
--- a/clang/test/CodeGen/debug-info-compilation-dir.c
+++ b/clang/test/CodeGen/debug-info-compilation-dir.c
@@ -7,3 +7,10 @@
// RUN: %clang_cc1 -emit-llvm -debug-info-kind=limited %s -o - | FileCheck -check-prefix=CHECK-DIR %s
// CHECK-DIR: CodeGen
+/// Test path remapping.
+// RUN: %clang_cc1 -fdebug-compilation-dir=%S -main-file-name %s -emit-llvm -debug-info-kind=limited %s -o - | FileCheck -check-prefix=CHECK-ABS %s
+// CHECK-ABS: DIFile(filename: "{{.*}}debug-info-compilation-dir.c", directory: "{{.*}}CodeGen")
+
+// RUN: %clang_cc1 -main-file-name %s -emit-llvm -debug-info-kind=limited %s -o - | FileCheck -check-prefix=CHECK-NOMAP %s
+// CHECK-NOMAP: DIFile(filename: "{{.*}}debug-info-compilation-dir.c", directory: "")
+
diff --git a/clang/test/CodeGen/debug-prefix-map.c b/clang/test/CodeGen/debug-prefix-map.c
index e242180..e58909f 100644
--- a/clang/test/CodeGen/debug-prefix-map.c
+++ b/clang/test/CodeGen/debug-prefix-map.c
@@ -12,6 +12,7 @@
// RUN: rm -rf %t && mkdir -p %t/a/b && cp %s %t/a/b/c.c
// RUN: %clang_cc1 -emit-llvm -debug-info-kind=standalone -I%S -fdebug-prefix-map=%t/a/b=y -fdebug-prefix-map=%t/a=x %t/a/b/c.c -o - | FileCheck %s --check-prefix=CHECK-X
// RUN: %clang_cc1 -emit-llvm -debug-info-kind=standalone -I%S -fdebug-prefix-map=%t/a=x -fdebug-prefix-map=%t/a/b=y %t/a/b/c.c -o - | FileCheck %s --check-prefix=CHECK-Y
+// RUN: %clang_cc1 -emit-llvm -debug-info-kind=standalone -I%S -main-file-name %t/a/b/c.c -fdebug-compilation-dir=%t/a -fdebug-prefix-map=%t/a=x -fdebug-prefix-map=%t/a/b=y %t/a/b/c.c -o - | FileCheck %s --check-prefix=CHECK-REMAP-Y
#include "Inputs/stdio.h"
@@ -26,9 +27,9 @@ void test_rewrite_includes(void) {
vprintf("string", argp);
}
-// CHECK-NO-MAIN-FILE-NAME: !DIFile(filename: "{{/|.:\\\\}}UNLIKELY_PATH{{/|\\\\}}empty{{/|\\\\}}<stdin>",
// CHECK-NO-MAIN-FILE-NAME: !DIFile(filename: "{{/|.:\\\\}}UNLIKELY_PATH{{/|\\\\}}empty{{/|\\\\}}{{.*}}",
// CHECK-NO-MAIN-FILE-NAME-SAME: directory: "")
+// CHECK-NO-MAIN-FILE-NAME: !DIFile(filename: "{{/|.:\\\\}}UNLIKELY_PATH{{/|\\\\}}empty{{/|\\\\}}<stdin>",
// CHECK-NO-MAIN-FILE-NAME: !DIFile(filename: "{{/|.:\\\\}}UNLIKELY_PATH{{/|\\\\}}empty{{/|\\\\}}Inputs{{/|\\\\}}stdio.h",
// CHECK-NO-MAIN-FILE-NAME-SAME: directory: "")
// CHECK-NO-MAIN-FILE-NAME-NOT: !DIFile(filename:
@@ -54,3 +55,5 @@ void test_rewrite_includes(void) {
// CHECK-X: !DIFile(filename: "x{{/|\\\\}}b{{/|\\\\}}c.c", directory: "")
// CHECK-Y: !DIFile(filename: "y{{/|\\\\}}c.c", directory: "")
+
+// CHECK-REMAP-Y: !DIFile(filename: "y{{/|\\\\}}c.c", directory: "x")
diff --git a/clang/test/CodeGenCXX/debug-info-function-context.cpp b/clang/test/CodeGenCXX/debug-info-function-context.cpp
index 63fdf87..29c87b6 100644
--- a/clang/test/CodeGenCXX/debug-info-function-context.cpp
+++ b/clang/test/CodeGenCXX/debug-info-function-context.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -emit-llvm -debug-info-kind=limited -triple x86_64-pc-linux-gnu %s \
+// RUN: %clang_cc1 -emit-llvm -debug-info-kind=limited -triple x86_64-pc-linux-gnu %s -fdebug-compilation-dir=%S \
// RUN: -dwarf-version=5 -main-file-name debug-info-function-context.cpp -o - | FileCheck %s
struct C {
diff --git a/clang/test/CodeGenCXX/difile_entry.cpp b/clang/test/CodeGenCXX/difile_entry.cpp
index 8bf6dc3..5fcd56e 100644
--- a/clang/test/CodeGenCXX/difile_entry.cpp
+++ b/clang/test/CodeGenCXX/difile_entry.cpp
@@ -3,7 +3,7 @@
/// Test that we canonicalize the DIFile.
// RUN: rm -rf %t && mkdir %t && cd %t
// RUN: cp %s .
-// RUN: %clang_cc1 -triple %itanium_abi_triple -main-file-name difile_entry.cpp -debug-info-kind=limited %t/difile_entry.cpp -std=c++11 -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 -triple %itanium_abi_triple -main-file-name difile_entry.cpp -fdebug-compilation-dir=%t -debug-info-kind=limited %t/difile_entry.cpp -std=c++11 -emit-llvm -o - | FileCheck %s
int x();
static int i = x();
diff --git a/clang/test/CodeGenHLSL/builtins/D3DCOLORtoUBYTE4.hlsl b/clang/test/CodeGenHLSL/builtins/D3DCOLORtoUBYTE4.hlsl
index 990f0aa..3c9e35a 100644
--- a/clang/test/CodeGenHLSL/builtins/D3DCOLORtoUBYTE4.hlsl
+++ b/clang/test/CodeGenHLSL/builtins/D3DCOLORtoUBYTE4.hlsl
@@ -5,8 +5,16 @@
// CHECK-LABEL: D3DCOLORtoUBYTE4
int4 test_D3DCOLORtoUBYTE4(float4 p1) {
// CHECK: %[[SCALED:.*]] = fmul [[FMFLAGS:.*]][[FLOAT_TYPE:<4 x float>]] %{{.*}}, splat (float 0x406FE01000000000)
- // CHECK: %[[CONVERTED:.*]] = fptoui [[FLOAT_TYPE]] %[[SCALED]] to [[INT_TYPE:<4 x i32>]]
+ // CHECK: %[[CONVERTED:.*]] = fptosi [[FLOAT_TYPE]] %[[SCALED]] to [[INT_TYPE:<4 x i32>]]
// CHECK: %[[SHUFFLED:.*]] = shufflevector [[INT_TYPE]] %[[CONVERTED]], [[INT_TYPE]] poison, <4 x i32> <i32 2, i32 1, i32 0, i32 3>
// CHECK: ret [[INT_TYPE]] %[[SHUFFLED]]
return D3DCOLORtoUBYTE4(p1);
}
+
+// Note this test confirms issue 150673 is fixed
+// by confirming the negative does not become a poison
+// CHECK-LABEL: test_constant_inputs
+int4 test_constant_inputs() {
+ // CHECK: ret <4 x i32> <i32 -12877, i32 2833, i32 0, i32 25500>
+ return D3DCOLORtoUBYTE4(float4(0, 11.11, -50.5, 100));
+}
diff --git a/clang/test/CodeGenObjC/exceptions.m b/clang/test/CodeGenObjC/exceptions.m
index 1546ed2..832d3a45 100644
--- a/clang/test/CodeGenObjC/exceptions.m
+++ b/clang/test/CodeGenObjC/exceptions.m
@@ -144,18 +144,17 @@ void f4(void) {
// CHECK-NEXT: br label
// -> rethrow
- // finally.call-exit: Predecessors are the @try and @catch fallthroughs
- // as well as the no-match case in the catch mechanism. The i1 is whether
- // to rethrow and should be true only in the last case.
- // CHECK: phi ptr
- // CHECK-NEXT: phi i1
- // CHECK-NEXT: call void @objc_exception_try_exit(ptr nonnull [[EXNDATA]])
+ // finally.call-exit: Predecessor is the no-match case in the catch mechanism
+ // which rethrows.
+ // CHECK: call void @objc_exception_try_exit(ptr nonnull [[EXNDATA]])
// CHECK-NEXT: call void @f4_help(i32 noundef 2)
- // CHECK-NEXT: br i1
- // -> ret, rethrow
+ // CHECK-NEXT: br label
+ // -> rethrow
- // ret:
- // CHECK: ret void
+ // finally.end.critedge: Predecessors are the @try and @catch fallthroughs.
+ // CHECK: call void @objc_exception_try_exit(ptr nonnull [[EXNDATA]])
+ // CHECK-NEXT: call void @f4_help(i32 noundef 2)
+ // CHECK-NEXT: ret void
// Catch mechanism:
// CHECK: call ptr @objc_exception_extract(ptr nonnull [[EXNDATA]])
diff --git a/clang/test/CodeGenOpenCL/amdgpu-features.cl b/clang/test/CodeGenOpenCL/amdgpu-features.cl
index e96dd66..efd70a9 100644
--- a/clang/test/CodeGenOpenCL/amdgpu-features.cl
+++ b/clang/test/CodeGenOpenCL/amdgpu-features.cl
@@ -108,7 +108,7 @@
// GFX1153: "target-features"="+16-bit-insts,+atomic-fadd-rtn-insts,+ci-insts,+dl-insts,+dot10-insts,+dot12-insts,+dot5-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32"
// GFX1200: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot10-insts,+dot11-insts,+dot12-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+fp8-conversion-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32"
// GFX1201: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot10-insts,+dot11-insts,+dot12-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+fp8-conversion-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32"
-// GFX1250: "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-global-pk-add-bf16-inst,+bf16-trans-insts,+bitop3-insts,+ci-insts,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32
+// GFX1250: "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-global-pk-add-bf16-inst,+bf16-cvt-insts,+bf16-trans-insts,+bitop3-insts,+ci-insts,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+tanh-insts,+transpose-load-f4f6-insts,+vmem-pref-insts,+wavefrontsize32
// GFX1103-W64: "target-features"="+16-bit-insts,+atomic-fadd-rtn-insts,+ci-insts,+dl-insts,+dot10-insts,+dot12-insts,+dot5-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize64"
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl
index 81f39f9..51ab970 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl
@@ -7,7 +7,20 @@
typedef unsigned int uint;
typedef unsigned short int ushort;
typedef unsigned int __attribute__((ext_vector_type(2))) uint2;
+typedef unsigned int __attribute__((ext_vector_type(3))) uint3;
+typedef unsigned int __attribute__((ext_vector_type(4))) uint4;
+typedef __bf16 __attribute__((ext_vector_type(2))) bfloat2;
+typedef __bf16 __attribute__((ext_vector_type(8))) bfloat8;
+typedef __bf16 __attribute__((ext_vector_type(16))) bfloat16;
+typedef __bf16 __attribute__((ext_vector_type(32))) bfloat32;
typedef half __attribute__((ext_vector_type(2))) half2;
+typedef half __attribute__((ext_vector_type(8))) half8;
+typedef half __attribute__((ext_vector_type(16))) half16;
+typedef half __attribute__((ext_vector_type(32))) half32;
+typedef float __attribute__((ext_vector_type(8))) float8;
+typedef float __attribute__((ext_vector_type(16))) float16;
+typedef float __attribute__((ext_vector_type(32))) float32;
+typedef short __attribute__((ext_vector_type(2))) short2;
// CHECK-LABEL: @test_setprio_inc_wg(
// CHECK-NEXT: entry:
@@ -254,6 +267,60 @@ void test_cos_bf16(global __bf16* out, __bf16 a)
*out = __builtin_amdgcn_cos_bf16(a);
}
+// CHECK-LABEL: @test_cvt_sr_pk_bf16_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[B_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[SR_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// CHECK-NEXT: [[SR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SR_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store float [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store float [[B:%.*]], ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[SR:%.*]], ptr [[SR_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[SR_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float [[TMP0]], float [[TMP1]], i32 [[TMP2]])
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <2 x bfloat> [[TMP3]], ptr addrspace(1) [[TMP4]], align 4
+// CHECK-NEXT: ret void
+//
+void test_cvt_sr_pk_bf16_f32(global bfloat2* out, float a, float b, uint sr)
+{
+ *out = __builtin_amdgcn_cvt_sr_pk_bf16_f32(a, b, sr);
+}
+
+// CHECK-LABEL: @test_cvt_sr_pk_f16_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[B_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[SR_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// CHECK-NEXT: [[SR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SR_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store float [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store float [[B:%.*]], ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[SR:%.*]], ptr [[SR_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[SR_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float [[TMP0]], float [[TMP1]], i32 [[TMP2]])
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <2 x half> [[TMP3]], ptr addrspace(1) [[TMP4]], align 4
+// CHECK-NEXT: ret void
+//
+void test_cvt_sr_pk_f16_f32(global half2* out, float a, float b, uint sr)
+{
+ *out = __builtin_amdgcn_cvt_sr_pk_f16_f32(a, b, sr);
+}
+
// CHECK-LABEL: @test_cvt_f16_fp8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
@@ -370,6 +437,243 @@ void test_cvt_pk_f16_bf8(global half2* out, short a)
out[0] = __builtin_amdgcn_cvt_pk_f16_bf8(a);
}
+// CHECK-LABEL: @test_cvt_pk_bf8_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca <2 x half>, align 4, addrspace(5)
+// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <2 x half> [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load <2 x half>, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i16 [[TMP1]], ptr addrspace(1) [[TMP2]], align 2
+// CHECK-NEXT: ret void
+//
+void test_cvt_pk_bf8_f16(global short* out, half2 a)
+{
+ *out = __builtin_amdgcn_cvt_pk_bf8_f16(a);
+}
+
+// CHECK-LABEL: @test_cvt_pk_fp8_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca <2 x half>, align 4, addrspace(5)
+// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <2 x half> [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load <2 x half>, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> [[TMP0]])
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i16 [[TMP1]], ptr addrspace(1) [[TMP2]], align 2
+// CHECK-NEXT: ret void
+//
+void test_cvt_pk_fp8_f16(global short* out, half2 a)
+{
+ *out = __builtin_amdgcn_cvt_pk_fp8_f16(a);
+}
+
+// CHECK-LABEL: @test_cvt_sr_bf8_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2, addrspace(5)
+// CHECK-NEXT: [[SR_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[OLD_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK-NEXT: [[SR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SR_ADDR]] to ptr
+// CHECK-NEXT: [[OLD_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OLD_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 2
+// CHECK-NEXT: store i32 [[SR:%.*]], ptr [[SR_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[OLD:%.*]], ptr [[OLD_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[SR_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[OLD_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 0)
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[TMP3]], ptr addrspace(1) [[TMP4]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load half, ptr [[A_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[SR_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[OLD_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half [[TMP5]], i32 [[TMP6]], i32 [[TMP7]], i32 1)
+// CHECK-NEXT: [[TMP9:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[TMP8]], ptr addrspace(1) [[TMP9]], align 4
+// CHECK-NEXT: [[TMP10:%.*]] = load half, ptr [[A_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[SR_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[OLD_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half [[TMP10]], i32 [[TMP11]], i32 [[TMP12]], i32 2)
+// CHECK-NEXT: [[TMP14:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[TMP13]], ptr addrspace(1) [[TMP14]], align 4
+// CHECK-NEXT: [[TMP15:%.*]] = load half, ptr [[A_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[SR_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[OLD_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP18:%.*]] = call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half [[TMP15]], i32 [[TMP16]], i32 [[TMP17]], i32 3)
+// CHECK-NEXT: [[TMP19:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[TMP18]], ptr addrspace(1) [[TMP19]], align 4
+// CHECK-NEXT: ret void
+//
+void test_cvt_sr_bf8_f16(global int* out, half a, uint sr, int old)
+{
+ *out = __builtin_amdgcn_cvt_sr_bf8_f16(a, sr, old, 0);
+ *out = __builtin_amdgcn_cvt_sr_bf8_f16(a, sr, old, 1);
+ *out = __builtin_amdgcn_cvt_sr_bf8_f16(a, sr, old, 2);
+ *out = __builtin_amdgcn_cvt_sr_bf8_f16(a, sr, old, 3);
+}
+
+// CHECK-LABEL: @test_cvt_sr_fp8_f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca half, align 2, addrspace(5)
+// CHECK-NEXT: [[SR_ADDR:%.*]] = alloca i16, align 2, addrspace(5)
+// CHECK-NEXT: [[OLD_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK-NEXT: [[SR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SR_ADDR]] to ptr
+// CHECK-NEXT: [[OLD_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OLD_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store half [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 2
+// CHECK-NEXT: store i16 [[SR:%.*]], ptr [[SR_ADDR_ASCAST]], align 2
+// CHECK-NEXT: store i32 [[OLD:%.*]], ptr [[OLD_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[A_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[SR_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[OLD_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half [[TMP0]], i32 [[CONV]], i32 [[TMP2]], i32 0)
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[TMP3]], ptr addrspace(1) [[TMP4]], align 4
+// CHECK-NEXT: [[TMP5:%.*]] = load half, ptr [[A_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[TMP6:%.*]] = load i16, ptr [[SR_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[CONV1:%.*]] = sext i16 [[TMP6]] to i32
+// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[OLD_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half [[TMP5]], i32 [[CONV1]], i32 [[TMP7]], i32 1)
+// CHECK-NEXT: [[TMP9:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[TMP8]], ptr addrspace(1) [[TMP9]], align 4
+// CHECK-NEXT: [[TMP10:%.*]] = load half, ptr [[A_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[TMP11:%.*]] = load i16, ptr [[SR_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[TMP11]] to i32
+// CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[OLD_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half [[TMP10]], i32 [[CONV2]], i32 [[TMP12]], i32 2)
+// CHECK-NEXT: [[TMP14:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[TMP13]], ptr addrspace(1) [[TMP14]], align 4
+// CHECK-NEXT: [[TMP15:%.*]] = load half, ptr [[A_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[TMP16:%.*]] = load i16, ptr [[SR_ADDR_ASCAST]], align 2
+// CHECK-NEXT: [[CONV3:%.*]] = sext i16 [[TMP16]] to i32
+// CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[OLD_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP18:%.*]] = call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half [[TMP15]], i32 [[CONV3]], i32 [[TMP17]], i32 3)
+// CHECK-NEXT: [[TMP19:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[TMP18]], ptr addrspace(1) [[TMP19]], align 4
+// CHECK-NEXT: ret void
+//
+void test_cvt_sr_fp8_f16(global int* out, half a, short sr, int old)
+{
+ *out = __builtin_amdgcn_cvt_sr_fp8_f16(a, sr, old, 0);
+ *out = __builtin_amdgcn_cvt_sr_fp8_f16(a, sr, old, 1);
+ *out = __builtin_amdgcn_cvt_sr_fp8_f16(a, sr, old, 2);
+ *out = __builtin_amdgcn_cvt_sr_fp8_f16(a, sr, old, 3);
+}
+
+// CHECK-LABEL: @test_cvt_scale_pk(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUTH8_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[OUTY8_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[SRC2_ADDR:%.*]] = alloca <2 x i32>, align 8, addrspace(5)
+// CHECK-NEXT: [[OUTF32_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[OUTF8_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[OUTH16_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[OUTY16_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[OUTF16_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[SRC3_ADDR:%.*]] = alloca <3 x i32>, align 16, addrspace(5)
+// CHECK-NEXT: [[SRC1_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[SCALE_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[OUTH8_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUTH8_ADDR]] to ptr
+// CHECK-NEXT: [[OUTY8_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUTY8_ADDR]] to ptr
+// CHECK-NEXT: [[SRC2_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SRC2_ADDR]] to ptr
+// CHECK-NEXT: [[OUTF32_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUTF32_ADDR]] to ptr
+// CHECK-NEXT: [[OUTF8_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUTF8_ADDR]] to ptr
+// CHECK-NEXT: [[OUTH16_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUTH16_ADDR]] to ptr
+// CHECK-NEXT: [[OUTY16_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUTY16_ADDR]] to ptr
+// CHECK-NEXT: [[OUTF16_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUTF16_ADDR]] to ptr
+// CHECK-NEXT: [[SRC3_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SRC3_ADDR]] to ptr
+// CHECK-NEXT: [[SRC1_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SRC1_ADDR]] to ptr
+// CHECK-NEXT: [[SCALE_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SCALE_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[OUTH8:%.*]], ptr [[OUTH8_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store ptr addrspace(1) [[OUTY8:%.*]], ptr [[OUTY8_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <2 x i32> [[SRC2:%.*]], ptr [[SRC2_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store ptr addrspace(1) [[OUTF32:%.*]], ptr [[OUTF32_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store ptr addrspace(1) [[OUTF8:%.*]], ptr [[OUTF8_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store ptr addrspace(1) [[OUTH16:%.*]], ptr [[OUTH16_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store ptr addrspace(1) [[OUTY16:%.*]], ptr [[OUTY16_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store ptr addrspace(1) [[OUTF16:%.*]], ptr [[OUTF16_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <3 x i32> [[SRC3:%.*]], ptr [[SRC3_ADDR_ASCAST]], align 16
+// CHECK-NEXT: store i32 [[SRC1:%.*]], ptr [[SRC1_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[SCALE:%.*]], ptr [[SCALE_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr [[SRC2_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[SCALE_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp8(<2 x i32> [[TMP0]], i32 [[TMP1]], i32 4)
+// CHECK-NEXT: [[TMP3:%.*]] = load ptr addrspace(1), ptr [[OUTH8_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <8 x half> [[TMP2]], ptr addrspace(1) [[TMP3]], align 16
+// CHECK-NEXT: [[TMP4:%.*]] = load <2 x i32>, ptr [[SRC2_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[SCALE_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = call <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp8(<2 x i32> [[TMP4]], i32 [[TMP5]], i32 5)
+// CHECK-NEXT: [[TMP7:%.*]] = load ptr addrspace(1), ptr [[OUTY8_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <8 x bfloat> [[TMP6]], ptr addrspace(1) [[TMP7]], align 16
+// CHECK-NEXT: [[TMP8:%.*]] = load <2 x i32>, ptr [[SRC2_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[SCALE_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP10:%.*]] = call <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.bf8(<2 x i32> [[TMP8]], i32 [[TMP9]], i32 6)
+// CHECK-NEXT: [[TMP11:%.*]] = load ptr addrspace(1), ptr [[OUTH8_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <8 x half> [[TMP10]], ptr addrspace(1) [[TMP11]], align 16
+// CHECK-NEXT: [[TMP12:%.*]] = load <2 x i32>, ptr [[SRC2_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[SCALE_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP14:%.*]] = call <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.bf8(<2 x i32> [[TMP12]], i32 [[TMP13]], i32 7)
+// CHECK-NEXT: [[TMP15:%.*]] = load ptr addrspace(1), ptr [[OUTY8_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <8 x bfloat> [[TMP14]], ptr addrspace(1) [[TMP15]], align 16
+// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[SRC1_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[SCALE_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP18:%.*]] = call <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp4(i32 [[TMP16]], i32 [[TMP17]], i32 1)
+// CHECK-NEXT: [[TMP19:%.*]] = load ptr addrspace(1), ptr [[OUTH8_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <8 x half> [[TMP18]], ptr addrspace(1) [[TMP19]], align 16
+// CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[SRC1_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[SCALE_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP22:%.*]] = call <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp4(i32 [[TMP20]], i32 [[TMP21]], i32 2)
+// CHECK-NEXT: [[TMP23:%.*]] = load ptr addrspace(1), ptr [[OUTY8_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <8 x bfloat> [[TMP22]], ptr addrspace(1) [[TMP23]], align 16
+// CHECK-NEXT: [[TMP24:%.*]] = load <2 x i32>, ptr [[SRC2_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[SCALE_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP26:%.*]] = call <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp8(<2 x i32> [[TMP24]], i32 [[TMP25]], i32 5)
+// CHECK-NEXT: [[TMP27:%.*]] = load ptr addrspace(1), ptr [[OUTF8_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <8 x float> [[TMP26]], ptr addrspace(1) [[TMP27]], align 32
+// CHECK-NEXT: [[TMP28:%.*]] = load <2 x i32>, ptr [[SRC2_ADDR_ASCAST]], align 8
+// CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[SCALE_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP30:%.*]] = call <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.bf8(<2 x i32> [[TMP28]], i32 [[TMP29]], i32 6)
+// CHECK-NEXT: [[TMP31:%.*]] = load ptr addrspace(1), ptr [[OUTF8_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <8 x float> [[TMP30]], ptr addrspace(1) [[TMP31]], align 32
+// CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[SRC1_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[SCALE_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP34:%.*]] = call <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp4(i32 [[TMP32]], i32 [[TMP33]], i32 7)
+// CHECK-NEXT: [[TMP35:%.*]] = load ptr addrspace(1), ptr [[OUTF8_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store <8 x float> [[TMP34]], ptr addrspace(1) [[TMP35]], align 32
+// CHECK-NEXT: ret void
+//
+void test_cvt_scale_pk(global half8 *outh8, global bfloat8 *outy8, uint2 src2,
+ global float32 *outf32, global float8 *outf8,
+ global half16 *outh16, global bfloat16 *outy16,
+ global float16 *outf16, uint3 src3,
+ uint src1, uint scale)
+{
+ *outh8 = __builtin_amdgcn_cvt_scale_pk8_f16_fp8(src2, scale, 4);
+ *outy8 = __builtin_amdgcn_cvt_scale_pk8_bf16_fp8(src2, scale, 5);
+ *outh8 = __builtin_amdgcn_cvt_scale_pk8_f16_bf8(src2, scale, 6);
+ *outy8 = __builtin_amdgcn_cvt_scale_pk8_bf16_bf8(src2, scale, 7);
+ *outh8 = __builtin_amdgcn_cvt_scale_pk8_f16_fp4(src1, scale, 1);
+ *outy8 = __builtin_amdgcn_cvt_scale_pk8_bf16_fp4(src1, scale, 2);
+ *outf8 = __builtin_amdgcn_cvt_scale_pk8_f32_fp8(src2, scale, 5);
+ *outf8 = __builtin_amdgcn_cvt_scale_pk8_f32_bf8(src2, scale, 6);
+ *outf8 = __builtin_amdgcn_cvt_scale_pk8_f32_fp4(src1, scale, 7);
+}
+
// CHECK-LABEL: @test_sat_pk4_i4_i8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
@@ -459,6 +763,60 @@ void test_prefetch(generic void *fptr, global void *gptr) {
__builtin_amdgcn_global_prefetch(gptr, 8);
}
+// CHECK-LABEL: @test_cvt_pk_fp8_f32_e5m3(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[OLD_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[B_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT: [[OLD_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OLD_ADDR]] to ptr
+// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[OLD:%.*]], ptr [[OLD_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store float [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store float [[B:%.*]], ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[OLD_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float [[TMP0]], float [[TMP1]], i32 [[TMP2]], i1 true)
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[TMP3]], ptr addrspace(1) [[TMP4]], align 4
+// CHECK-NEXT: ret void
+//
+void test_cvt_pk_fp8_f32_e5m3(global int* out, int old, float a, float b)
+{
+ *out = __builtin_amdgcn_cvt_pk_fp8_f32_e5m3(a, b, old, true);
+}
+
+// CHECK-LABEL: @test_cvt_sr_fp8_f32_e5m3(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[OLD_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT: [[OLD_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OLD_ADDR]] to ptr
+// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[OLD:%.*]], ptr [[OLD_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store float [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[OLD_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 3)
+// CHECK-NEXT: [[TMP4:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[TMP3]], ptr addrspace(1) [[TMP4]], align 4
+// CHECK-NEXT: ret void
+//
+void test_cvt_sr_fp8_f32_e5m3(global int* out, int old, float a, int b)
+{
+ *out = __builtin_amdgcn_cvt_sr_fp8_f32_e5m3(a, b, old, 3);
+}
+
// CHECK-LABEL: @test_cvt_f32_fp8_e5m3(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
diff --git a/clang/test/Driver/baremetal.cpp b/clang/test/Driver/baremetal.cpp
index 8b5ab43..26f030d 100644
--- a/clang/test/Driver/baremetal.cpp
+++ b/clang/test/Driver/baremetal.cpp
@@ -163,6 +163,16 @@
// RUN: | FileCheck %s --check-prefix=CHECK-RTLIB-GCC
// CHECK-RTLIB-GCC: -lgcc
+// RUN: %clang -### --target=arm-none-eabi -nolibc -rtlib=compiler-rt %s 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK-NOLIBC
+// CHECK-NOLIBC-NOT: "-lc"
+// CHECK-NOLIBC: "{{[^"]*}}libclang_rt.builtins.a"
+
+// RUN: %clang -### --target=arm-none-eabi -nostdlib -rtlib=compiler-rt %s 2>&1 \
+// RUN: | FileCheck %s --check-prefix=CHECK-NOSTDLIB
+// CHECK-NOSTDLIB-NOT: "-lc"
+// CHECK-NOSTDLIB-NOT: "{{[^"]*}}libclang_rt.builtins.a"
+
// RUN: %clang -### --target=arm-none-eabi -v %s 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-SYSROOT-INC
// CHECK-SYSROOT-INC-NOT: "-internal-isystem" "include"
diff --git a/clang/test/Driver/compilation-dir.c b/clang/test/Driver/compilation-dir.c
index dbe801c..70a117b 100644
--- a/clang/test/Driver/compilation-dir.c
+++ b/clang/test/Driver/compilation-dir.c
@@ -8,3 +8,8 @@
// RUN: %clang -### -integrated-as -ffile-compilation-dir=. -x assembler %s 2>&1 | FileCheck -check-prefixes=CHECK-DEBUG-COMPILATION-DIR %s
// CHECK-DEBUG-COMPILATION-DIR: "-fdebug-compilation-dir=."
// CHECK-DEBUG-COMPILATION-DIR-NOT: "-ffile-compilation-dir=."
+
+// RUN: %clang -### -S %s -working-directory %S 2>&1 | FileCheck -check-prefix=CHECK-CWD %s
+// RUN: cd %S
+// RUN: %clang -### -S %s 2>&1 | FileCheck -check-prefix=CHECK-CWD %s
+// CHECK-CWD: -fdebug-compilation-dir={{.*}}Driver
diff --git a/clang/test/Driver/wasm-features.c b/clang/test/Driver/wasm-features.c
index 746bd7b..f0215ec 100644
--- a/clang/test/Driver/wasm-features.c
+++ b/clang/test/Driver/wasm-features.c
@@ -41,6 +41,12 @@
// HALF-PRECISION: "-target-feature" "+fp16"
// NO-HALF-PRECISION: "-target-feature" "-fp16"
+// RUN: %clang --target=wasm32-unknown-unknown -### %s -mgc 2>&1 | FileCheck %s -check-prefix=GC
+// RUN: %clang --target=wasm32-unknown-unknown -### %s -mno-gc 2>&1 | FileCheck %s -check-prefix=NO-GC
+
+// GC: "-target-feature" "+gc"
+// NO-GC: "-target-feature" "-gc"
+
// RUN: %clang --target=wasm32-unknown-unknown -### %s -mmultimemory 2>&1 | FileCheck %s -check-prefix=MULTIMEMORY
// RUN: %clang --target=wasm32-unknown-unknown -### %s -mno-multimemory 2>&1 | FileCheck %s -check-prefix=NO-MULTIMEMORY
diff --git a/clang/test/Frontend/dump-minimization-hints.cpp b/clang/test/Frontend/dump-minimization-hints.cpp
index 4843786..273fd7f 100644
--- a/clang/test/Frontend/dump-minimization-hints.cpp
+++ b/clang/test/Frontend/dump-minimization-hints.cpp
@@ -39,6 +39,26 @@
// RANGE-NEXT: "line": 15,
// RANGE-NEXT: "column": 2
// RANGE-NEXT: }
+// RANGE-NEXT: },
+// RANGE-NEXT: {
+// RANGE-NEXT: "from": {
+// RANGE-NEXT: "line": 19,
+// RANGE-NEXT: "column": 1
+// RANGE-NEXT: },
+// RANGE-NEXT: "to": {
+// RANGE-NEXT: "line": 19,
+// RANGE-NEXT: "column": 41
+// RANGE-NEXT: }
+// RANGE-NEXT: },
+// RANGE-NEXT: {
+// RANGE-NEXT: "from": {
+// RANGE-NEXT: "line": 20,
+// RANGE-NEXT: "column": 1
+// RANGE-NEXT: },
+// RANGE-NEXT: "to": {
+// RANGE-NEXT: "line": 23,
+// RANGE-NEXT: "column": 2
+// RANGE-NEXT: }
// RANGE-NEXT: }
// RANGE-NEXT: ]
// RANGE-NEXT: }
@@ -68,6 +88,16 @@ int multiply(int a, int b) {
return a * b;
}
+inline int unused_by_foo() {} // line 17
+
+inline void recursively_used_by_foo() {} // line 19
+inline int used_by_foo() { // line 20
+ recursively_used_by_foo();
+ return 1;
+}
+
+struct UnusedByFoo {};
+
//--- foo.cpp
#include "foo.h"
int global_value = 5;
@@ -76,4 +106,6 @@ int main() {
int current_value = data.getValue();
int doubled_value = multiply(current_value, 2);
int final_result = doubled_value + global_value;
+
+ return used_by_foo();
}
diff --git a/clang/test/Headers/__clang_hip_math.hip b/clang/test/Headers/__clang_hip_math.hip
index d31ca84..81c5f43 100644
--- a/clang/test/Headers/__clang_hip_math.hip
+++ b/clang/test/Headers/__clang_hip_math.hip
@@ -49,30 +49,27 @@ typedef unsigned long long uint64_t;
// CHECK-LABEL: @test___make_mantissa_base8(
// CHECK-NEXT: entry:
-// CHECK-NEXT: br label [[WHILE_COND_I:%.*]]
-// CHECK: while.cond.i:
-// CHECK-NEXT: [[__TAGP_ADDR_0_I:%.*]] = phi ptr [ [[P:%.*]], [[ENTRY:%.*]] ], [ [[__TAGP_ADDR_1_I:%.*]], [[CLEANUP_I:%.*]] ]
-// CHECK-NEXT: [[__R_0_I:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[__R_1_I:%.*]], [[CLEANUP_I]] ]
-// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I]], align 1, !tbaa [[TBAA4:![0-9]+]]
-// CHECK-NEXT: [[CMP_NOT_I:%.*]] = icmp eq i8 [[TMP0]], 0
-// CHECK-NEXT: br i1 [[CMP_NOT_I]], label [[_ZL21__MAKE_MANTISSA_BASE8PKC_EXIT:%.*]], label [[WHILE_BODY_I:%.*]]
+// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[P:%.*]], align 1, !tbaa [[TBAA4:![0-9]+]]
+// CHECK-NEXT: [[CMP_NOT_I1:%.*]] = icmp eq i8 [[TMP0]], 0
+// CHECK-NEXT: br i1 [[CMP_NOT_I1]], label [[_ZL21__MAKE_MANTISSA_BASE8PKC_EXIT:%.*]], label [[WHILE_BODY_I:%.*]]
// CHECK: while.body.i:
-// CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], -8
-// CHECK-NEXT: [[OR_COND_I:%.*]] = icmp eq i8 [[TMP1]], 48
-// CHECK-NEXT: br i1 [[OR_COND_I]], label [[IF_THEN_I:%.*]], label [[CLEANUP_I]]
+// CHECK-NEXT: [[TMP1:%.*]] = phi i8 [ [[TMP3:%.*]], [[IF_THEN_I:%.*]] ], [ [[TMP0]], [[ENTRY:%.*]] ]
+// CHECK-NEXT: [[__R_0_I3:%.*]] = phi i64 [ [[SUB_I:%.*]], [[IF_THEN_I]] ], [ 0, [[ENTRY]] ]
+// CHECK-NEXT: [[__TAGP_ADDR_0_I2:%.*]] = phi ptr [ [[INCDEC_PTR_I:%.*]], [[IF_THEN_I]] ], [ [[P]], [[ENTRY]] ]
+// CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], -8
+// CHECK-NEXT: [[OR_COND_I:%.*]] = icmp eq i8 [[TMP2]], 48
+// CHECK-NEXT: br i1 [[OR_COND_I]], label [[IF_THEN_I]], label [[_ZL21__MAKE_MANTISSA_BASE8PKC_EXIT]]
// CHECK: if.then.i:
-// CHECK-NEXT: [[MUL_I:%.*]] = shl i64 [[__R_0_I]], 3
-// CHECK-NEXT: [[CONV5_I:%.*]] = zext nneg i8 [[TMP0]] to i64
+// CHECK-NEXT: [[MUL_I:%.*]] = shl i64 [[__R_0_I3]], 3
+// CHECK-NEXT: [[CONV5_I:%.*]] = zext nneg i8 [[TMP1]] to i64
// CHECK-NEXT: [[ADD_I:%.*]] = add i64 [[MUL_I]], -48
-// CHECK-NEXT: [[SUB_I:%.*]] = add i64 [[ADD_I]], [[CONV5_I]]
-// CHECK-NEXT: [[INCDEC_PTR_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I]], i64 1
-// CHECK-NEXT: br label [[CLEANUP_I]]
-// CHECK: cleanup.i:
-// CHECK-NEXT: [[__TAGP_ADDR_1_I]] = phi ptr [ [[INCDEC_PTR_I]], [[IF_THEN_I]] ], [ [[__TAGP_ADDR_0_I]], [[WHILE_BODY_I]] ]
-// CHECK-NEXT: [[__R_1_I]] = phi i64 [ [[SUB_I]], [[IF_THEN_I]] ], [ [[__R_0_I]], [[WHILE_BODY_I]] ]
-// CHECK-NEXT: br i1 [[OR_COND_I]], label [[WHILE_COND_I]], label [[_ZL21__MAKE_MANTISSA_BASE8PKC_EXIT]], !llvm.loop [[LOOP7:![0-9]+]]
+// CHECK-NEXT: [[SUB_I]] = add i64 [[ADD_I]], [[CONV5_I]]
+// CHECK-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I2]], i64 1
+// CHECK-NEXT: [[TMP3]] = load i8, ptr [[INCDEC_PTR_I]], align 1, !tbaa [[TBAA4]]
+// CHECK-NEXT: [[CMP_NOT_I:%.*]] = icmp eq i8 [[TMP3]], 0
+// CHECK-NEXT: br i1 [[CMP_NOT_I]], label [[_ZL21__MAKE_MANTISSA_BASE8PKC_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP7:![0-9]+]]
// CHECK: _ZL21__make_mantissa_base8PKc.exit:
-// CHECK-NEXT: [[RETVAL_2_I:%.*]] = phi i64 [ 0, [[CLEANUP_I]] ], [ [[__R_0_I]], [[WHILE_COND_I]] ]
+// CHECK-NEXT: [[RETVAL_2_I:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ 0, [[WHILE_BODY_I]] ], [ [[SUB_I]], [[IF_THEN_I]] ]
// CHECK-NEXT: ret i64 [[RETVAL_2_I]]
//
// AMDGCNSPIRV-LABEL: @test___make_mantissa_base8(
@@ -105,30 +102,27 @@ extern "C" __device__ uint64_t test___make_mantissa_base8(const char *p) {
// CHECK-LABEL: @test___make_mantissa_base10(
// CHECK-NEXT: entry:
-// CHECK-NEXT: br label [[WHILE_COND_I:%.*]]
-// CHECK: while.cond.i:
-// CHECK-NEXT: [[__TAGP_ADDR_0_I:%.*]] = phi ptr [ [[P:%.*]], [[ENTRY:%.*]] ], [ [[__TAGP_ADDR_1_I:%.*]], [[CLEANUP_I:%.*]] ]
-// CHECK-NEXT: [[__R_0_I:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[__R_1_I:%.*]], [[CLEANUP_I]] ]
-// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I]], align 1, !tbaa [[TBAA4]]
-// CHECK-NEXT: [[CMP_NOT_I:%.*]] = icmp eq i8 [[TMP0]], 0
-// CHECK-NEXT: br i1 [[CMP_NOT_I]], label [[_ZL22__MAKE_MANTISSA_BASE10PKC_EXIT:%.*]], label [[WHILE_BODY_I:%.*]]
+// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[P:%.*]], align 1, !tbaa [[TBAA4]]
+// CHECK-NEXT: [[CMP_NOT_I1:%.*]] = icmp eq i8 [[TMP0]], 0
+// CHECK-NEXT: br i1 [[CMP_NOT_I1]], label [[_ZL22__MAKE_MANTISSA_BASE10PKC_EXIT:%.*]], label [[WHILE_BODY_I:%.*]]
// CHECK: while.body.i:
-// CHECK-NEXT: [[TMP1:%.*]] = add i8 [[TMP0]], -48
-// CHECK-NEXT: [[OR_COND_I:%.*]] = icmp ult i8 [[TMP1]], 10
-// CHECK-NEXT: br i1 [[OR_COND_I]], label [[IF_THEN_I:%.*]], label [[CLEANUP_I]]
+// CHECK-NEXT: [[TMP1:%.*]] = phi i8 [ [[TMP3:%.*]], [[IF_THEN_I:%.*]] ], [ [[TMP0]], [[ENTRY:%.*]] ]
+// CHECK-NEXT: [[__R_0_I3:%.*]] = phi i64 [ [[SUB_I:%.*]], [[IF_THEN_I]] ], [ 0, [[ENTRY]] ]
+// CHECK-NEXT: [[__TAGP_ADDR_0_I2:%.*]] = phi ptr [ [[INCDEC_PTR_I:%.*]], [[IF_THEN_I]] ], [ [[P]], [[ENTRY]] ]
+// CHECK-NEXT: [[TMP2:%.*]] = add i8 [[TMP1]], -48
+// CHECK-NEXT: [[OR_COND_I:%.*]] = icmp ult i8 [[TMP2]], 10
+// CHECK-NEXT: br i1 [[OR_COND_I]], label [[IF_THEN_I]], label [[_ZL22__MAKE_MANTISSA_BASE10PKC_EXIT]]
// CHECK: if.then.i:
-// CHECK-NEXT: [[MUL_I:%.*]] = mul i64 [[__R_0_I]], 10
-// CHECK-NEXT: [[CONV5_I:%.*]] = zext nneg i8 [[TMP0]] to i64
+// CHECK-NEXT: [[MUL_I:%.*]] = mul i64 [[__R_0_I3]], 10
+// CHECK-NEXT: [[CONV5_I:%.*]] = zext nneg i8 [[TMP1]] to i64
// CHECK-NEXT: [[ADD_I:%.*]] = add i64 [[MUL_I]], -48
-// CHECK-NEXT: [[SUB_I:%.*]] = add i64 [[ADD_I]], [[CONV5_I]]
-// CHECK-NEXT: [[INCDEC_PTR_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I]], i64 1
-// CHECK-NEXT: br label [[CLEANUP_I]]
-// CHECK: cleanup.i:
-// CHECK-NEXT: [[__TAGP_ADDR_1_I]] = phi ptr [ [[INCDEC_PTR_I]], [[IF_THEN_I]] ], [ [[__TAGP_ADDR_0_I]], [[WHILE_BODY_I]] ]
-// CHECK-NEXT: [[__R_1_I]] = phi i64 [ [[SUB_I]], [[IF_THEN_I]] ], [ [[__R_0_I]], [[WHILE_BODY_I]] ]
-// CHECK-NEXT: br i1 [[OR_COND_I]], label [[WHILE_COND_I]], label [[_ZL22__MAKE_MANTISSA_BASE10PKC_EXIT]], !llvm.loop [[LOOP10:![0-9]+]]
+// CHECK-NEXT: [[SUB_I]] = add i64 [[ADD_I]], [[CONV5_I]]
+// CHECK-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I2]], i64 1
+// CHECK-NEXT: [[TMP3]] = load i8, ptr [[INCDEC_PTR_I]], align 1, !tbaa [[TBAA4]]
+// CHECK-NEXT: [[CMP_NOT_I:%.*]] = icmp eq i8 [[TMP3]], 0
+// CHECK-NEXT: br i1 [[CMP_NOT_I]], label [[_ZL22__MAKE_MANTISSA_BASE10PKC_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP10:![0-9]+]]
// CHECK: _ZL22__make_mantissa_base10PKc.exit:
-// CHECK-NEXT: [[RETVAL_2_I:%.*]] = phi i64 [ 0, [[CLEANUP_I]] ], [ [[__R_0_I]], [[WHILE_COND_I]] ]
+// CHECK-NEXT: [[RETVAL_2_I:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ 0, [[WHILE_BODY_I]] ], [ [[SUB_I]], [[IF_THEN_I]] ]
// CHECK-NEXT: ret i64 [[RETVAL_2_I]]
//
// AMDGCNSPIRV-LABEL: @test___make_mantissa_base10(
@@ -161,78 +155,70 @@ extern "C" __device__ uint64_t test___make_mantissa_base10(const char *p) {
// CHECK-LABEL: @test___make_mantissa_base16(
// CHECK-NEXT: entry:
-// CHECK-NEXT: br label [[WHILE_COND_I:%.*]]
-// CHECK: while.cond.i:
-// CHECK-NEXT: [[__TAGP_ADDR_0_I:%.*]] = phi ptr [ [[P:%.*]], [[ENTRY:%.*]] ], [ [[__TAGP_ADDR_1_I:%.*]], [[CLEANUP_I:%.*]] ]
-// CHECK-NEXT: [[__R_0_I:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[__R_2_I:%.*]], [[CLEANUP_I]] ]
-// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I]], align 1, !tbaa [[TBAA4]]
-// CHECK-NEXT: [[CMP_NOT_I:%.*]] = icmp eq i8 [[TMP0]], 0
-// CHECK-NEXT: br i1 [[CMP_NOT_I]], label [[_ZL22__MAKE_MANTISSA_BASE16PKC_EXIT:%.*]], label [[WHILE_BODY_I:%.*]]
+// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[P:%.*]], align 1, !tbaa [[TBAA4]]
+// CHECK-NEXT: [[CMP_NOT_I1:%.*]] = icmp eq i8 [[TMP0]], 0
+// CHECK-NEXT: br i1 [[CMP_NOT_I1]], label [[_ZL22__MAKE_MANTISSA_BASE16PKC_EXIT:%.*]], label [[WHILE_BODY_I:%.*]]
// CHECK: while.body.i:
-// CHECK-NEXT: [[TMP1:%.*]] = add i8 [[TMP0]], -48
-// CHECK-NEXT: [[OR_COND_I:%.*]] = icmp ult i8 [[TMP1]], 10
-// CHECK-NEXT: br i1 [[OR_COND_I]], label [[IF_END31_I:%.*]], label [[IF_ELSE_I:%.*]]
+// CHECK-NEXT: [[TMP1:%.*]] = phi i8 [ [[TMP5:%.*]], [[IF_END31_I:%.*]] ], [ [[TMP0]], [[ENTRY:%.*]] ]
+// CHECK-NEXT: [[__R_0_I3:%.*]] = phi i64 [ [[ADD28_I:%.*]], [[IF_END31_I]] ], [ 0, [[ENTRY]] ]
+// CHECK-NEXT: [[__TAGP_ADDR_0_I2:%.*]] = phi ptr [ [[INCDEC_PTR_I:%.*]], [[IF_END31_I]] ], [ [[P]], [[ENTRY]] ]
+// CHECK-NEXT: [[TMP2:%.*]] = add i8 [[TMP1]], -48
+// CHECK-NEXT: [[OR_COND_I:%.*]] = icmp ult i8 [[TMP2]], 10
+// CHECK-NEXT: br i1 [[OR_COND_I]], label [[IF_END31_I]], label [[IF_ELSE_I:%.*]]
// CHECK: if.else.i:
-// CHECK-NEXT: [[TMP2:%.*]] = add i8 [[TMP0]], -97
-// CHECK-NEXT: [[OR_COND33_I:%.*]] = icmp ult i8 [[TMP2]], 6
+// CHECK-NEXT: [[TMP3:%.*]] = add i8 [[TMP1]], -97
+// CHECK-NEXT: [[OR_COND33_I:%.*]] = icmp ult i8 [[TMP3]], 6
// CHECK-NEXT: br i1 [[OR_COND33_I]], label [[IF_END31_I]], label [[IF_ELSE17_I:%.*]]
// CHECK: if.else17.i:
-// CHECK-NEXT: [[TMP3:%.*]] = add i8 [[TMP0]], -65
-// CHECK-NEXT: [[OR_COND34_I:%.*]] = icmp ult i8 [[TMP3]], 6
-// CHECK-NEXT: br i1 [[OR_COND34_I]], label [[IF_END31_I]], label [[CLEANUP_I]]
+// CHECK-NEXT: [[TMP4:%.*]] = add i8 [[TMP1]], -65
+// CHECK-NEXT: [[OR_COND34_I:%.*]] = icmp ult i8 [[TMP4]], 6
+// CHECK-NEXT: br i1 [[OR_COND34_I]], label [[IF_END31_I]], label [[_ZL22__MAKE_MANTISSA_BASE16PKC_EXIT]]
// CHECK: if.end31.i:
// CHECK-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I]] ], [ -87, [[IF_ELSE_I]] ], [ -55, [[IF_ELSE17_I]] ]
-// CHECK-NEXT: [[MUL24_I:%.*]] = shl i64 [[__R_0_I]], 4
-// CHECK-NEXT: [[CONV25_I:%.*]] = zext nneg i8 [[TMP0]] to i64
+// CHECK-NEXT: [[MUL24_I:%.*]] = shl i64 [[__R_0_I3]], 4
+// CHECK-NEXT: [[CONV25_I:%.*]] = zext nneg i8 [[TMP1]] to i64
// CHECK-NEXT: [[ADD26_I:%.*]] = add i64 [[MUL24_I]], [[DOTSINK]]
-// CHECK-NEXT: [[ADD28_I:%.*]] = add i64 [[ADD26_I]], [[CONV25_I]]
-// CHECK-NEXT: [[INCDEC_PTR_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I]], i64 1
-// CHECK-NEXT: br label [[CLEANUP_I]]
-// CHECK: cleanup.i:
-// CHECK-NEXT: [[__TAGP_ADDR_1_I]] = phi ptr [ [[INCDEC_PTR_I]], [[IF_END31_I]] ], [ [[__TAGP_ADDR_0_I]], [[IF_ELSE17_I]] ]
-// CHECK-NEXT: [[__R_2_I]] = phi i64 [ [[ADD28_I]], [[IF_END31_I]] ], [ [[__R_0_I]], [[IF_ELSE17_I]] ]
-// CHECK-NEXT: [[COND_I:%.*]] = phi i1 [ true, [[IF_END31_I]] ], [ false, [[IF_ELSE17_I]] ]
-// CHECK-NEXT: br i1 [[COND_I]], label [[WHILE_COND_I]], label [[_ZL22__MAKE_MANTISSA_BASE16PKC_EXIT]], !llvm.loop [[LOOP11:![0-9]+]]
+// CHECK-NEXT: [[ADD28_I]] = add i64 [[ADD26_I]], [[CONV25_I]]
+// CHECK-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I2]], i64 1
+// CHECK-NEXT: [[TMP5]] = load i8, ptr [[INCDEC_PTR_I]], align 1, !tbaa [[TBAA4]]
+// CHECK-NEXT: [[CMP_NOT_I:%.*]] = icmp eq i8 [[TMP5]], 0
+// CHECK-NEXT: br i1 [[CMP_NOT_I]], label [[_ZL22__MAKE_MANTISSA_BASE16PKC_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP11:![0-9]+]]
// CHECK: _ZL22__make_mantissa_base16PKc.exit:
-// CHECK-NEXT: [[RETVAL_2_I:%.*]] = phi i64 [ 0, [[CLEANUP_I]] ], [ [[__R_0_I]], [[WHILE_COND_I]] ]
+// CHECK-NEXT: [[RETVAL_2_I:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ 0, [[IF_ELSE17_I]] ], [ [[ADD28_I]], [[IF_END31_I]] ]
// CHECK-NEXT: ret i64 [[RETVAL_2_I]]
//
// AMDGCNSPIRV-LABEL: @test___make_mantissa_base16(
// AMDGCNSPIRV-NEXT: entry:
-// AMDGCNSPIRV-NEXT: br label [[WHILE_COND_I:%.*]]
-// AMDGCNSPIRV: while.cond.i:
-// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_0_I:%.*]] = phi ptr addrspace(4) [ [[P:%.*]], [[ENTRY:%.*]] ], [ [[__TAGP_ADDR_1_I:%.*]], [[CLEANUP_I:%.*]] ]
-// AMDGCNSPIRV-NEXT: [[__R_0_I:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[__R_2_I:%.*]], [[CLEANUP_I]] ]
-// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I]], align 1, !tbaa [[TBAA5]]
-// AMDGCNSPIRV-NEXT: [[CMP_NOT_I:%.*]] = icmp eq i8 [[TMP0]], 0
-// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I]], label [[_ZL22__MAKE_MANTISSA_BASE16PKC_EXIT:%.*]], label [[WHILE_BODY_I:%.*]]
+// AMDGCNSPIRV-NEXT: [[TMP0:%.*]] = load i8, ptr addrspace(4) [[P:%.*]], align 1, !tbaa [[TBAA5]]
+// AMDGCNSPIRV-NEXT: [[CMP_NOT_I1:%.*]] = icmp eq i8 [[TMP0]], 0
+// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I1]], label [[_ZL22__MAKE_MANTISSA_BASE16PKC_EXIT:%.*]], label [[WHILE_BODY_I:%.*]]
// AMDGCNSPIRV: while.body.i:
-// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = add i8 [[TMP0]], -48
-// AMDGCNSPIRV-NEXT: [[OR_COND_I:%.*]] = icmp ult i8 [[TMP1]], 10
-// AMDGCNSPIRV-NEXT: br i1 [[OR_COND_I]], label [[IF_END31_I:%.*]], label [[IF_ELSE_I:%.*]]
+// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = phi i8 [ [[TMP5:%.*]], [[IF_END31_I:%.*]] ], [ [[TMP0]], [[ENTRY:%.*]] ]
+// AMDGCNSPIRV-NEXT: [[__R_0_I3:%.*]] = phi i64 [ [[ADD28_I:%.*]], [[IF_END31_I]] ], [ 0, [[ENTRY]] ]
+// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_0_I2:%.*]] = phi ptr addrspace(4) [ [[INCDEC_PTR_I:%.*]], [[IF_END31_I]] ], [ [[P]], [[ENTRY]] ]
+// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = add i8 [[TMP1]], -48
+// AMDGCNSPIRV-NEXT: [[OR_COND_I:%.*]] = icmp ult i8 [[TMP2]], 10
+// AMDGCNSPIRV-NEXT: br i1 [[OR_COND_I]], label [[IF_END31_I]], label [[IF_ELSE_I:%.*]]
// AMDGCNSPIRV: if.else.i:
-// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = add i8 [[TMP0]], -97
-// AMDGCNSPIRV-NEXT: [[OR_COND33_I:%.*]] = icmp ult i8 [[TMP2]], 6
+// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = add i8 [[TMP1]], -97
+// AMDGCNSPIRV-NEXT: [[OR_COND33_I:%.*]] = icmp ult i8 [[TMP3]], 6
// AMDGCNSPIRV-NEXT: br i1 [[OR_COND33_I]], label [[IF_END31_I]], label [[IF_ELSE17_I:%.*]]
// AMDGCNSPIRV: if.else17.i:
-// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = add i8 [[TMP0]], -65
-// AMDGCNSPIRV-NEXT: [[OR_COND34_I:%.*]] = icmp ult i8 [[TMP3]], 6
-// AMDGCNSPIRV-NEXT: br i1 [[OR_COND34_I]], label [[IF_END31_I]], label [[CLEANUP_I]]
+// AMDGCNSPIRV-NEXT: [[TMP4:%.*]] = add i8 [[TMP1]], -65
+// AMDGCNSPIRV-NEXT: [[OR_COND34_I:%.*]] = icmp ult i8 [[TMP4]], 6
+// AMDGCNSPIRV-NEXT: br i1 [[OR_COND34_I]], label [[IF_END31_I]], label [[_ZL22__MAKE_MANTISSA_BASE16PKC_EXIT]]
// AMDGCNSPIRV: if.end31.i:
// AMDGCNSPIRV-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I]] ], [ -87, [[IF_ELSE_I]] ], [ -55, [[IF_ELSE17_I]] ]
-// AMDGCNSPIRV-NEXT: [[MUL24_I:%.*]] = shl i64 [[__R_0_I]], 4
-// AMDGCNSPIRV-NEXT: [[CONV25_I:%.*]] = zext nneg i8 [[TMP0]] to i64
+// AMDGCNSPIRV-NEXT: [[MUL24_I:%.*]] = shl i64 [[__R_0_I3]], 4
+// AMDGCNSPIRV-NEXT: [[CONV25_I:%.*]] = zext nneg i8 [[TMP1]] to i64
// AMDGCNSPIRV-NEXT: [[ADD26_I:%.*]] = add i64 [[MUL24_I]], [[DOTSINK]]
-// AMDGCNSPIRV-NEXT: [[ADD28_I:%.*]] = add i64 [[ADD26_I]], [[CONV25_I]]
-// AMDGCNSPIRV-NEXT: [[INCDEC_PTR_I:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[__TAGP_ADDR_0_I]], i64 1
-// AMDGCNSPIRV-NEXT: br label [[CLEANUP_I]]
-// AMDGCNSPIRV: cleanup.i:
-// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_1_I]] = phi ptr addrspace(4) [ [[INCDEC_PTR_I]], [[IF_END31_I]] ], [ [[__TAGP_ADDR_0_I]], [[IF_ELSE17_I]] ]
-// AMDGCNSPIRV-NEXT: [[__R_2_I]] = phi i64 [ [[ADD28_I]], [[IF_END31_I]] ], [ [[__R_0_I]], [[IF_ELSE17_I]] ]
-// AMDGCNSPIRV-NEXT: [[COND_I:%.*]] = phi i1 [ true, [[IF_END31_I]] ], [ false, [[IF_ELSE17_I]] ]
-// AMDGCNSPIRV-NEXT: br i1 [[COND_I]], label [[WHILE_COND_I]], label [[_ZL22__MAKE_MANTISSA_BASE16PKC_EXIT]], !llvm.loop [[LOOP12:![0-9]+]]
+// AMDGCNSPIRV-NEXT: [[ADD28_I]] = add i64 [[ADD26_I]], [[CONV25_I]]
+// AMDGCNSPIRV-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[__TAGP_ADDR_0_I2]], i64 1
+// AMDGCNSPIRV-NEXT: [[TMP5]] = load i8, ptr addrspace(4) [[INCDEC_PTR_I]], align 1, !tbaa [[TBAA5]]
+// AMDGCNSPIRV-NEXT: [[CMP_NOT_I:%.*]] = icmp eq i8 [[TMP5]], 0
+// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I]], label [[_ZL22__MAKE_MANTISSA_BASE16PKC_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP12:![0-9]+]]
// AMDGCNSPIRV: _ZL22__make_mantissa_base16PKc.exit:
-// AMDGCNSPIRV-NEXT: [[RETVAL_2_I:%.*]] = phi i64 [ 0, [[CLEANUP_I]] ], [ [[__R_0_I]], [[WHILE_COND_I]] ]
+// AMDGCNSPIRV-NEXT: [[RETVAL_2_I:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ 0, [[IF_ELSE17_I]] ], [ [[ADD28_I]], [[IF_END31_I]] ]
// AMDGCNSPIRV-NEXT: ret i64 [[RETVAL_2_I]]
//
extern "C" __device__ uint64_t test___make_mantissa_base16(const char *p) {
@@ -243,91 +229,85 @@ extern "C" __device__ uint64_t test___make_mantissa_base16(const char *p) {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[P:%.*]], align 1, !tbaa [[TBAA4]]
// CHECK-NEXT: [[CMP_I:%.*]] = icmp eq i8 [[TMP0]], 48
-// CHECK-NEXT: br i1 [[CMP_I]], label [[IF_THEN_I:%.*]], label [[WHILE_COND_I14_I:%.*]]
+// CHECK-NEXT: br i1 [[CMP_I]], label [[IF_THEN_I:%.*]], label [[WHILE_COND_I14_I_PREHEADER:%.*]]
+// CHECK: while.cond.i14.i.preheader:
+// CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[P]], align 1, !tbaa [[TBAA4]]
+// CHECK-NEXT: [[CMP_NOT_I17_I5:%.*]] = icmp eq i8 [[TMP1]], 0
+// CHECK-NEXT: br i1 [[CMP_NOT_I17_I5]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT:%.*]], label [[WHILE_BODY_I18_I:%.*]]
// CHECK: if.then.i:
// CHECK-NEXT: [[INCDEC_PTR_I:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 1
-// CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[INCDEC_PTR_I]], align 1, !tbaa [[TBAA4]]
-// CHECK-NEXT: switch i8 [[TMP1]], label [[WHILE_COND_I_I:%.*]] [
-// CHECK-NEXT: i8 120, label [[WHILE_COND_I30_I_PREHEADER:%.*]]
-// CHECK-NEXT: i8 88, label [[WHILE_COND_I30_I_PREHEADER]]
+// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[INCDEC_PTR_I]], align 1, !tbaa [[TBAA4]]
+// CHECK-NEXT: switch i8 [[TMP2]], label [[WHILE_COND_I_I_PREHEADER:%.*]] [
+// CHECK-NEXT: i8 120, label [[IF_THEN5_I:%.*]]
+// CHECK-NEXT: i8 88, label [[IF_THEN5_I]]
// CHECK-NEXT: ]
-// CHECK: while.cond.i30.i.preheader:
-// CHECK-NEXT: br label [[WHILE_COND_I30_I:%.*]]
-// CHECK: while.cond.i30.i:
-// CHECK-NEXT: [[__TAGP_ADDR_0_I31_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I37_I:%.*]], [[CLEANUP_I36_I:%.*]] ], [ [[INCDEC_PTR_I]], [[WHILE_COND_I30_I_PREHEADER]] ]
-// CHECK-NEXT: [[__R_0_I32_I:%.*]] = phi i64 [ [[__R_2_I_I:%.*]], [[CLEANUP_I36_I]] ], [ 0, [[WHILE_COND_I30_I_PREHEADER]] ]
-// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I31_I]], align 1, !tbaa [[TBAA4]]
-// CHECK-NEXT: [[CMP_NOT_I33_I:%.*]] = icmp eq i8 [[TMP2]], 0
-// CHECK-NEXT: br i1 [[CMP_NOT_I33_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT:%.*]], label [[WHILE_BODY_I34_I:%.*]]
-// CHECK: while.body.i34.i:
-// CHECK-NEXT: [[TMP3:%.*]] = add i8 [[TMP2]], -48
-// CHECK-NEXT: [[OR_COND_I35_I:%.*]] = icmp ult i8 [[TMP3]], 10
-// CHECK-NEXT: br i1 [[OR_COND_I35_I]], label [[IF_END31_I_I:%.*]], label [[IF_ELSE_I_I:%.*]]
+// CHECK: while.cond.i.i.preheader:
+// CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[INCDEC_PTR_I]], align 1, !tbaa [[TBAA4]]
+// CHECK-NEXT: [[CMP_NOT_I_I14:%.*]] = icmp eq i8 [[TMP3]], 0
+// CHECK-NEXT: br i1 [[CMP_NOT_I_I14]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]], label [[WHILE_BODY_I_I:%.*]]
+// CHECK: if.then5.i:
+// CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[INCDEC_PTR_I]], align 1, !tbaa [[TBAA4]]
+// CHECK-NEXT: [[CMP_NOT_I30_I9:%.*]] = icmp eq i8 [[TMP4]], 0
+// CHECK-NEXT: br i1 [[CMP_NOT_I30_I9]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]], label [[WHILE_BODY_I31_I:%.*]]
+// CHECK: while.body.i31.i:
+// CHECK-NEXT: [[TMP5:%.*]] = phi i8 [ [[TMP9:%.*]], [[IF_END31_I_I:%.*]] ], [ [[TMP4]], [[IF_THEN5_I]] ]
+// CHECK-NEXT: [[__R_0_I29_I11:%.*]] = phi i64 [ [[ADD28_I_I:%.*]], [[IF_END31_I_I]] ], [ 0, [[IF_THEN5_I]] ]
+// CHECK-NEXT: [[__TAGP_ADDR_0_I28_I10:%.*]] = phi ptr [ [[INCDEC_PTR_I34_I:%.*]], [[IF_END31_I_I]] ], [ [[INCDEC_PTR_I]], [[IF_THEN5_I]] ]
+// CHECK-NEXT: [[TMP6:%.*]] = add i8 [[TMP5]], -48
+// CHECK-NEXT: [[OR_COND_I32_I:%.*]] = icmp ult i8 [[TMP6]], 10
+// CHECK-NEXT: br i1 [[OR_COND_I32_I]], label [[IF_END31_I_I]], label [[IF_ELSE_I_I:%.*]]
// CHECK: if.else.i.i:
-// CHECK-NEXT: [[TMP4:%.*]] = add i8 [[TMP2]], -97
-// CHECK-NEXT: [[OR_COND33_I_I:%.*]] = icmp ult i8 [[TMP4]], 6
+// CHECK-NEXT: [[TMP7:%.*]] = add i8 [[TMP5]], -97
+// CHECK-NEXT: [[OR_COND33_I_I:%.*]] = icmp ult i8 [[TMP7]], 6
// CHECK-NEXT: br i1 [[OR_COND33_I_I]], label [[IF_END31_I_I]], label [[IF_ELSE17_I_I:%.*]]
// CHECK: if.else17.i.i:
-// CHECK-NEXT: [[TMP5:%.*]] = add i8 [[TMP2]], -65
-// CHECK-NEXT: [[OR_COND34_I_I:%.*]] = icmp ult i8 [[TMP5]], 6
-// CHECK-NEXT: br i1 [[OR_COND34_I_I]], label [[IF_END31_I_I]], label [[CLEANUP_I36_I]]
+// CHECK-NEXT: [[TMP8:%.*]] = add i8 [[TMP5]], -65
+// CHECK-NEXT: [[OR_COND34_I_I:%.*]] = icmp ult i8 [[TMP8]], 6
+// CHECK-NEXT: br i1 [[OR_COND34_I_I]], label [[IF_END31_I_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]]
// CHECK: if.end31.i.i:
-// CHECK-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I34_I]] ], [ -87, [[IF_ELSE_I_I]] ], [ -55, [[IF_ELSE17_I_I]] ]
-// CHECK-NEXT: [[MUL24_I_I:%.*]] = shl i64 [[__R_0_I32_I]], 4
-// CHECK-NEXT: [[CONV25_I_I:%.*]] = zext nneg i8 [[TMP2]] to i64
+// CHECK-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I31_I]] ], [ -87, [[IF_ELSE_I_I]] ], [ -55, [[IF_ELSE17_I_I]] ]
+// CHECK-NEXT: [[MUL24_I_I:%.*]] = shl i64 [[__R_0_I29_I11]], 4
+// CHECK-NEXT: [[CONV25_I_I:%.*]] = zext nneg i8 [[TMP5]] to i64
// CHECK-NEXT: [[ADD26_I_I:%.*]] = add i64 [[MUL24_I_I]], [[DOTSINK]]
-// CHECK-NEXT: [[ADD28_I_I:%.*]] = add i64 [[ADD26_I_I]], [[CONV25_I_I]]
-// CHECK-NEXT: [[INCDEC_PTR_I40_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I31_I]], i64 1
-// CHECK-NEXT: br label [[CLEANUP_I36_I]]
-// CHECK: cleanup.i36.i:
-// CHECK-NEXT: [[__TAGP_ADDR_1_I37_I]] = phi ptr [ [[INCDEC_PTR_I40_I]], [[IF_END31_I_I]] ], [ [[__TAGP_ADDR_0_I31_I]], [[IF_ELSE17_I_I]] ]
-// CHECK-NEXT: [[__R_2_I_I]] = phi i64 [ [[ADD28_I_I]], [[IF_END31_I_I]] ], [ [[__R_0_I32_I]], [[IF_ELSE17_I_I]] ]
-// CHECK-NEXT: [[COND_I_I:%.*]] = phi i1 [ true, [[IF_END31_I_I]] ], [ false, [[IF_ELSE17_I_I]] ]
-// CHECK-NEXT: br i1 [[COND_I_I]], label [[WHILE_COND_I30_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]], !llvm.loop [[LOOP11]]
-// CHECK: while.cond.i.i:
-// CHECK-NEXT: [[__TAGP_ADDR_0_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I_I:%.*]], [[CLEANUP_I_I:%.*]] ], [ [[INCDEC_PTR_I]], [[IF_THEN_I]] ]
-// CHECK-NEXT: [[__R_0_I_I:%.*]] = phi i64 [ [[__R_1_I_I:%.*]], [[CLEANUP_I_I]] ], [ 0, [[IF_THEN_I]] ]
-// CHECK-NEXT: [[TMP6:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I_I]], align 1, !tbaa [[TBAA4]]
-// CHECK-NEXT: [[CMP_NOT_I_I:%.*]] = icmp eq i8 [[TMP6]], 0
-// CHECK-NEXT: br i1 [[CMP_NOT_I_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]], label [[WHILE_BODY_I_I:%.*]]
+// CHECK-NEXT: [[ADD28_I_I]] = add i64 [[ADD26_I_I]], [[CONV25_I_I]]
+// CHECK-NEXT: [[INCDEC_PTR_I34_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I28_I10]], i64 1
+// CHECK-NEXT: [[TMP9]] = load i8, ptr [[INCDEC_PTR_I34_I]], align 1, !tbaa [[TBAA4]]
+// CHECK-NEXT: [[CMP_NOT_I30_I:%.*]] = icmp eq i8 [[TMP9]], 0
+// CHECK-NEXT: br i1 [[CMP_NOT_I30_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]], label [[WHILE_BODY_I31_I]], !llvm.loop [[LOOP11]]
// CHECK: while.body.i.i:
-// CHECK-NEXT: [[TMP7:%.*]] = and i8 [[TMP6]], -8
-// CHECK-NEXT: [[OR_COND_I_I:%.*]] = icmp eq i8 [[TMP7]], 48
-// CHECK-NEXT: br i1 [[OR_COND_I_I]], label [[IF_THEN_I_I:%.*]], label [[CLEANUP_I_I]]
+// CHECK-NEXT: [[TMP10:%.*]] = phi i8 [ [[TMP12:%.*]], [[IF_THEN_I_I:%.*]] ], [ [[TMP3]], [[WHILE_COND_I_I_PREHEADER]] ]
+// CHECK-NEXT: [[__R_0_I_I16:%.*]] = phi i64 [ [[SUB_I_I:%.*]], [[IF_THEN_I_I]] ], [ 0, [[WHILE_COND_I_I_PREHEADER]] ]
+// CHECK-NEXT: [[__TAGP_ADDR_0_I_I15:%.*]] = phi ptr [ [[INCDEC_PTR_I_I:%.*]], [[IF_THEN_I_I]] ], [ [[INCDEC_PTR_I]], [[WHILE_COND_I_I_PREHEADER]] ]
+// CHECK-NEXT: [[TMP11:%.*]] = and i8 [[TMP10]], -8
+// CHECK-NEXT: [[OR_COND_I_I:%.*]] = icmp eq i8 [[TMP11]], 48
+// CHECK-NEXT: br i1 [[OR_COND_I_I]], label [[IF_THEN_I_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]]
// CHECK: if.then.i.i:
-// CHECK-NEXT: [[MUL_I_I:%.*]] = shl i64 [[__R_0_I_I]], 3
-// CHECK-NEXT: [[CONV5_I_I:%.*]] = zext nneg i8 [[TMP6]] to i64
+// CHECK-NEXT: [[MUL_I_I:%.*]] = shl i64 [[__R_0_I_I16]], 3
+// CHECK-NEXT: [[CONV5_I_I:%.*]] = zext nneg i8 [[TMP10]] to i64
// CHECK-NEXT: [[ADD_I_I:%.*]] = add i64 [[MUL_I_I]], -48
-// CHECK-NEXT: [[SUB_I_I:%.*]] = add i64 [[ADD_I_I]], [[CONV5_I_I]]
-// CHECK-NEXT: [[INCDEC_PTR_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I_I]], i64 1
-// CHECK-NEXT: br label [[CLEANUP_I_I]]
-// CHECK: cleanup.i.i:
-// CHECK-NEXT: [[__TAGP_ADDR_1_I_I]] = phi ptr [ [[INCDEC_PTR_I_I]], [[IF_THEN_I_I]] ], [ [[__TAGP_ADDR_0_I_I]], [[WHILE_BODY_I_I]] ]
-// CHECK-NEXT: [[__R_1_I_I]] = phi i64 [ [[SUB_I_I]], [[IF_THEN_I_I]] ], [ [[__R_0_I_I]], [[WHILE_BODY_I_I]] ]
-// CHECK-NEXT: br i1 [[OR_COND_I_I]], label [[WHILE_COND_I_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]], !llvm.loop [[LOOP7]]
-// CHECK: while.cond.i14.i:
-// CHECK-NEXT: [[__TAGP_ADDR_0_I15_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I21_I:%.*]], [[CLEANUP_I20_I:%.*]] ], [ [[P]], [[ENTRY:%.*]] ]
-// CHECK-NEXT: [[__R_0_I16_I:%.*]] = phi i64 [ [[__R_1_I22_I:%.*]], [[CLEANUP_I20_I]] ], [ 0, [[ENTRY]] ]
-// CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I15_I]], align 1, !tbaa [[TBAA4]]
-// CHECK-NEXT: [[CMP_NOT_I17_I:%.*]] = icmp eq i8 [[TMP8]], 0
-// CHECK-NEXT: br i1 [[CMP_NOT_I17_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]], label [[WHILE_BODY_I18_I:%.*]]
+// CHECK-NEXT: [[SUB_I_I]] = add i64 [[ADD_I_I]], [[CONV5_I_I]]
+// CHECK-NEXT: [[INCDEC_PTR_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I_I15]], i64 1
+// CHECK-NEXT: [[TMP12]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// CHECK-NEXT: [[CMP_NOT_I_I:%.*]] = icmp eq i8 [[TMP12]], 0
+// CHECK-NEXT: br i1 [[CMP_NOT_I_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]], label [[WHILE_BODY_I_I]], !llvm.loop [[LOOP7]]
// CHECK: while.body.i18.i:
-// CHECK-NEXT: [[TMP9:%.*]] = add i8 [[TMP8]], -48
-// CHECK-NEXT: [[OR_COND_I19_I:%.*]] = icmp ult i8 [[TMP9]], 10
-// CHECK-NEXT: br i1 [[OR_COND_I19_I]], label [[IF_THEN_I24_I:%.*]], label [[CLEANUP_I20_I]]
-// CHECK: if.then.i24.i:
-// CHECK-NEXT: [[MUL_I25_I:%.*]] = mul i64 [[__R_0_I16_I]], 10
-// CHECK-NEXT: [[CONV5_I26_I:%.*]] = zext nneg i8 [[TMP8]] to i64
-// CHECK-NEXT: [[ADD_I27_I:%.*]] = add i64 [[MUL_I25_I]], -48
-// CHECK-NEXT: [[SUB_I28_I:%.*]] = add i64 [[ADD_I27_I]], [[CONV5_I26_I]]
-// CHECK-NEXT: [[INCDEC_PTR_I29_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I15_I]], i64 1
-// CHECK-NEXT: br label [[CLEANUP_I20_I]]
-// CHECK: cleanup.i20.i:
-// CHECK-NEXT: [[__TAGP_ADDR_1_I21_I]] = phi ptr [ [[INCDEC_PTR_I29_I]], [[IF_THEN_I24_I]] ], [ [[__TAGP_ADDR_0_I15_I]], [[WHILE_BODY_I18_I]] ]
-// CHECK-NEXT: [[__R_1_I22_I]] = phi i64 [ [[SUB_I28_I]], [[IF_THEN_I24_I]] ], [ [[__R_0_I16_I]], [[WHILE_BODY_I18_I]] ]
-// CHECK-NEXT: br i1 [[OR_COND_I19_I]], label [[WHILE_COND_I14_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]], !llvm.loop [[LOOP10]]
+// CHECK-NEXT: [[TMP13:%.*]] = phi i8 [ [[TMP15:%.*]], [[IF_THEN_I21_I:%.*]] ], [ [[TMP1]], [[WHILE_COND_I14_I_PREHEADER]] ]
+// CHECK-NEXT: [[__R_0_I16_I7:%.*]] = phi i64 [ [[SUB_I25_I:%.*]], [[IF_THEN_I21_I]] ], [ 0, [[WHILE_COND_I14_I_PREHEADER]] ]
+// CHECK-NEXT: [[__TAGP_ADDR_0_I15_I6:%.*]] = phi ptr [ [[INCDEC_PTR_I26_I:%.*]], [[IF_THEN_I21_I]] ], [ [[P]], [[WHILE_COND_I14_I_PREHEADER]] ]
+// CHECK-NEXT: [[TMP14:%.*]] = add i8 [[TMP13]], -48
+// CHECK-NEXT: [[OR_COND_I19_I:%.*]] = icmp ult i8 [[TMP14]], 10
+// CHECK-NEXT: br i1 [[OR_COND_I19_I]], label [[IF_THEN_I21_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]]
+// CHECK: if.then.i21.i:
+// CHECK-NEXT: [[MUL_I22_I:%.*]] = mul i64 [[__R_0_I16_I7]], 10
+// CHECK-NEXT: [[CONV5_I23_I:%.*]] = zext nneg i8 [[TMP13]] to i64
+// CHECK-NEXT: [[ADD_I24_I:%.*]] = add i64 [[MUL_I22_I]], -48
+// CHECK-NEXT: [[SUB_I25_I]] = add i64 [[ADD_I24_I]], [[CONV5_I23_I]]
+// CHECK-NEXT: [[INCDEC_PTR_I26_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I15_I6]], i64 1
+// CHECK-NEXT: [[TMP15]] = load i8, ptr [[INCDEC_PTR_I26_I]], align 1, !tbaa [[TBAA4]]
+// CHECK-NEXT: [[CMP_NOT_I17_I:%.*]] = icmp eq i8 [[TMP15]], 0
+// CHECK-NEXT: br i1 [[CMP_NOT_I17_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]], label [[WHILE_BODY_I18_I]], !llvm.loop [[LOOP10]]
// CHECK: _ZL15__make_mantissaPKc.exit:
-// CHECK-NEXT: [[RETVAL_0_I:%.*]] = phi i64 [ 0, [[CLEANUP_I_I]] ], [ [[__R_0_I_I]], [[WHILE_COND_I_I]] ], [ 0, [[CLEANUP_I36_I]] ], [ [[__R_0_I32_I]], [[WHILE_COND_I30_I]] ], [ 0, [[CLEANUP_I20_I]] ], [ [[__R_0_I16_I]], [[WHILE_COND_I14_I]] ]
+// CHECK-NEXT: [[RETVAL_0_I:%.*]] = phi i64 [ 0, [[WHILE_COND_I_I_PREHEADER]] ], [ 0, [[IF_THEN5_I]] ], [ 0, [[WHILE_COND_I14_I_PREHEADER]] ], [ [[SUB_I_I]], [[IF_THEN_I_I]] ], [ 0, [[WHILE_BODY_I_I]] ], [ [[ADD28_I_I]], [[IF_END31_I_I]] ], [ 0, [[IF_ELSE17_I_I]] ], [ [[SUB_I25_I]], [[IF_THEN_I21_I]] ], [ 0, [[WHILE_BODY_I18_I]] ]
// CHECK-NEXT: ret i64 [[RETVAL_0_I]]
//
// AMDGCNSPIRV-LABEL: @test___make_mantissa(
@@ -339,53 +319,49 @@ extern "C" __device__ uint64_t test___make_mantissa_base16(const char *p) {
// AMDGCNSPIRV-NEXT: [[INCDEC_PTR_I:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[P]], i64 1
// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = load i8, ptr addrspace(4) [[INCDEC_PTR_I]], align 1, !tbaa [[TBAA5]]
// AMDGCNSPIRV-NEXT: switch i8 [[TMP1]], label [[WHILE_COND_I_I:%.*]] [
-// AMDGCNSPIRV-NEXT: i8 120, label [[WHILE_COND_I28_I_PREHEADER:%.*]]
-// AMDGCNSPIRV-NEXT: i8 88, label [[WHILE_COND_I28_I_PREHEADER]]
+// AMDGCNSPIRV-NEXT: i8 120, label [[IF_THEN5_I:%.*]]
+// AMDGCNSPIRV-NEXT: i8 88, label [[IF_THEN5_I]]
// AMDGCNSPIRV-NEXT: ]
-// AMDGCNSPIRV: while.cond.i28.i.preheader:
-// AMDGCNSPIRV-NEXT: br label [[WHILE_COND_I28_I:%.*]]
-// AMDGCNSPIRV: while.cond.i28.i:
-// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_0_I29_I:%.*]] = phi ptr addrspace(4) [ [[__TAGP_ADDR_1_I34_I:%.*]], [[CLEANUP_I_I:%.*]] ], [ [[INCDEC_PTR_I]], [[WHILE_COND_I28_I_PREHEADER]] ]
-// AMDGCNSPIRV-NEXT: [[__R_0_I30_I:%.*]] = phi i64 [ [[__R_2_I_I:%.*]], [[CLEANUP_I_I]] ], [ 0, [[WHILE_COND_I28_I_PREHEADER]] ]
-// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I29_I]], align 1, !tbaa [[TBAA5]]
-// AMDGCNSPIRV-NEXT: [[CMP_NOT_I31_I:%.*]] = icmp eq i8 [[TMP2]], 0
-// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I31_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT:%.*]], label [[WHILE_BODY_I32_I:%.*]]
+// AMDGCNSPIRV: if.then5.i:
+// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i8, ptr addrspace(4) [[INCDEC_PTR_I]], align 1, !tbaa [[TBAA5]]
+// AMDGCNSPIRV-NEXT: [[CMP_NOT_I31_I5:%.*]] = icmp eq i8 [[TMP2]], 0
+// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I31_I5]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT:%.*]], label [[WHILE_BODY_I32_I:%.*]]
// AMDGCNSPIRV: while.body.i32.i:
-// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = add i8 [[TMP2]], -48
-// AMDGCNSPIRV-NEXT: [[OR_COND_I33_I:%.*]] = icmp ult i8 [[TMP3]], 10
-// AMDGCNSPIRV-NEXT: br i1 [[OR_COND_I33_I]], label [[IF_END31_I_I:%.*]], label [[IF_ELSE_I_I:%.*]]
+// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = phi i8 [ [[TMP7:%.*]], [[IF_END31_I_I:%.*]] ], [ [[TMP2]], [[IF_THEN5_I]] ]
+// AMDGCNSPIRV-NEXT: [[__R_0_I30_I7:%.*]] = phi i64 [ [[ADD28_I_I:%.*]], [[IF_END31_I_I]] ], [ 0, [[IF_THEN5_I]] ]
+// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_0_I29_I6:%.*]] = phi ptr addrspace(4) [ [[INCDEC_PTR_I36_I:%.*]], [[IF_END31_I_I]] ], [ [[INCDEC_PTR_I]], [[IF_THEN5_I]] ]
+// AMDGCNSPIRV-NEXT: [[TMP4:%.*]] = add i8 [[TMP3]], -48
+// AMDGCNSPIRV-NEXT: [[OR_COND_I33_I:%.*]] = icmp ult i8 [[TMP4]], 10
+// AMDGCNSPIRV-NEXT: br i1 [[OR_COND_I33_I]], label [[IF_END31_I_I]], label [[IF_ELSE_I_I:%.*]]
// AMDGCNSPIRV: if.else.i.i:
-// AMDGCNSPIRV-NEXT: [[TMP4:%.*]] = add i8 [[TMP2]], -97
-// AMDGCNSPIRV-NEXT: [[OR_COND33_I_I:%.*]] = icmp ult i8 [[TMP4]], 6
+// AMDGCNSPIRV-NEXT: [[TMP5:%.*]] = add i8 [[TMP3]], -97
+// AMDGCNSPIRV-NEXT: [[OR_COND33_I_I:%.*]] = icmp ult i8 [[TMP5]], 6
// AMDGCNSPIRV-NEXT: br i1 [[OR_COND33_I_I]], label [[IF_END31_I_I]], label [[IF_ELSE17_I_I:%.*]]
// AMDGCNSPIRV: if.else17.i.i:
-// AMDGCNSPIRV-NEXT: [[TMP5:%.*]] = add i8 [[TMP2]], -65
-// AMDGCNSPIRV-NEXT: [[OR_COND34_I_I:%.*]] = icmp ult i8 [[TMP5]], 6
-// AMDGCNSPIRV-NEXT: br i1 [[OR_COND34_I_I]], label [[IF_END31_I_I]], label [[CLEANUP_I_I]]
+// AMDGCNSPIRV-NEXT: [[TMP6:%.*]] = add i8 [[TMP3]], -65
+// AMDGCNSPIRV-NEXT: [[OR_COND34_I_I:%.*]] = icmp ult i8 [[TMP6]], 6
+// AMDGCNSPIRV-NEXT: br i1 [[OR_COND34_I_I]], label [[IF_END31_I_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]]
// AMDGCNSPIRV: if.end31.i.i:
// AMDGCNSPIRV-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I32_I]] ], [ -87, [[IF_ELSE_I_I]] ], [ -55, [[IF_ELSE17_I_I]] ]
-// AMDGCNSPIRV-NEXT: [[MUL24_I_I:%.*]] = shl i64 [[__R_0_I30_I]], 4
-// AMDGCNSPIRV-NEXT: [[CONV25_I_I:%.*]] = zext nneg i8 [[TMP2]] to i64
+// AMDGCNSPIRV-NEXT: [[MUL24_I_I:%.*]] = shl i64 [[__R_0_I30_I7]], 4
+// AMDGCNSPIRV-NEXT: [[CONV25_I_I:%.*]] = zext nneg i8 [[TMP3]] to i64
// AMDGCNSPIRV-NEXT: [[ADD26_I_I:%.*]] = add i64 [[MUL24_I_I]], [[DOTSINK]]
-// AMDGCNSPIRV-NEXT: [[ADD28_I_I:%.*]] = add i64 [[ADD26_I_I]], [[CONV25_I_I]]
-// AMDGCNSPIRV-NEXT: [[INCDEC_PTR_I37_I:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[__TAGP_ADDR_0_I29_I]], i64 1
-// AMDGCNSPIRV-NEXT: br label [[CLEANUP_I_I]]
-// AMDGCNSPIRV: cleanup.i.i:
-// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_1_I34_I]] = phi ptr addrspace(4) [ [[INCDEC_PTR_I37_I]], [[IF_END31_I_I]] ], [ [[__TAGP_ADDR_0_I29_I]], [[IF_ELSE17_I_I]] ]
-// AMDGCNSPIRV-NEXT: [[__R_2_I_I]] = phi i64 [ [[ADD28_I_I]], [[IF_END31_I_I]] ], [ [[__R_0_I30_I]], [[IF_ELSE17_I_I]] ]
-// AMDGCNSPIRV-NEXT: [[COND_I_I:%.*]] = phi i1 [ true, [[IF_END31_I_I]] ], [ false, [[IF_ELSE17_I_I]] ]
-// AMDGCNSPIRV-NEXT: br i1 [[COND_I_I]], label [[WHILE_COND_I28_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]], !llvm.loop [[LOOP12]]
+// AMDGCNSPIRV-NEXT: [[ADD28_I_I]] = add i64 [[ADD26_I_I]], [[CONV25_I_I]]
+// AMDGCNSPIRV-NEXT: [[INCDEC_PTR_I36_I]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[__TAGP_ADDR_0_I29_I6]], i64 1
+// AMDGCNSPIRV-NEXT: [[TMP7]] = load i8, ptr addrspace(4) [[INCDEC_PTR_I36_I]], align 1, !tbaa [[TBAA5]]
+// AMDGCNSPIRV-NEXT: [[CMP_NOT_I31_I:%.*]] = icmp eq i8 [[TMP7]], 0
+// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I31_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]], label [[WHILE_BODY_I32_I]], !llvm.loop [[LOOP12]]
// AMDGCNSPIRV: while.cond.i.i:
// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_0_I_I:%.*]] = phi ptr addrspace(4) [ [[__TAGP_ADDR_1_I_I:%.*]], [[WHILE_BODY_I_I:%.*]] ], [ [[INCDEC_PTR_I]], [[IF_THEN_I]] ]
// AMDGCNSPIRV-NEXT: [[__R_0_I_I:%.*]] = phi i64 [ [[__R_1_I_I:%.*]], [[WHILE_BODY_I_I]] ], [ 0, [[IF_THEN_I]] ]
-// AMDGCNSPIRV-NEXT: [[TMP6:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I_I]], align 1, !tbaa [[TBAA5]]
-// AMDGCNSPIRV-NEXT: [[CMP_NOT_I_I:%.*]] = icmp eq i8 [[TMP6]], 0
+// AMDGCNSPIRV-NEXT: [[TMP8:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I_I]], align 1, !tbaa [[TBAA5]]
+// AMDGCNSPIRV-NEXT: [[CMP_NOT_I_I:%.*]] = icmp eq i8 [[TMP8]], 0
// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]], label [[WHILE_BODY_I_I]]
// AMDGCNSPIRV: while.body.i.i:
-// AMDGCNSPIRV-NEXT: [[TMP7:%.*]] = and i8 [[TMP6]], -8
-// AMDGCNSPIRV-NEXT: [[OR_COND_I_I:%.*]] = icmp eq i8 [[TMP7]], 48
+// AMDGCNSPIRV-NEXT: [[TMP9:%.*]] = and i8 [[TMP8]], -8
+// AMDGCNSPIRV-NEXT: [[OR_COND_I_I:%.*]] = icmp eq i8 [[TMP9]], 48
// AMDGCNSPIRV-NEXT: [[MUL_I_I:%.*]] = shl i64 [[__R_0_I_I]], 3
-// AMDGCNSPIRV-NEXT: [[CONV5_I_I:%.*]] = zext nneg i8 [[TMP6]] to i64
+// AMDGCNSPIRV-NEXT: [[CONV5_I_I:%.*]] = zext nneg i8 [[TMP8]] to i64
// AMDGCNSPIRV-NEXT: [[ADD_I_I:%.*]] = add i64 [[MUL_I_I]], -48
// AMDGCNSPIRV-NEXT: [[SUB_I_I:%.*]] = add i64 [[ADD_I_I]], [[CONV5_I_I]]
// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_1_I_I_IDX:%.*]] = zext i1 [[OR_COND_I_I]] to i64
@@ -395,14 +371,14 @@ extern "C" __device__ uint64_t test___make_mantissa_base16(const char *p) {
// AMDGCNSPIRV: while.cond.i14.i:
// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_0_I15_I:%.*]] = phi ptr addrspace(4) [ [[__TAGP_ADDR_1_I25_I:%.*]], [[WHILE_BODY_I18_I:%.*]] ], [ [[P]], [[ENTRY:%.*]] ]
// AMDGCNSPIRV-NEXT: [[__R_0_I16_I:%.*]] = phi i64 [ [[__R_1_I26_I:%.*]], [[WHILE_BODY_I18_I]] ], [ 0, [[ENTRY]] ]
-// AMDGCNSPIRV-NEXT: [[TMP8:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I15_I]], align 1, !tbaa [[TBAA5]]
-// AMDGCNSPIRV-NEXT: [[CMP_NOT_I17_I:%.*]] = icmp eq i8 [[TMP8]], 0
+// AMDGCNSPIRV-NEXT: [[TMP10:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I15_I]], align 1, !tbaa [[TBAA5]]
+// AMDGCNSPIRV-NEXT: [[CMP_NOT_I17_I:%.*]] = icmp eq i8 [[TMP10]], 0
// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I17_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]], label [[WHILE_BODY_I18_I]]
// AMDGCNSPIRV: while.body.i18.i:
-// AMDGCNSPIRV-NEXT: [[TMP9:%.*]] = add i8 [[TMP8]], -48
-// AMDGCNSPIRV-NEXT: [[OR_COND_I19_I:%.*]] = icmp ult i8 [[TMP9]], 10
+// AMDGCNSPIRV-NEXT: [[TMP11:%.*]] = add i8 [[TMP10]], -48
+// AMDGCNSPIRV-NEXT: [[OR_COND_I19_I:%.*]] = icmp ult i8 [[TMP11]], 10
// AMDGCNSPIRV-NEXT: [[MUL_I20_I:%.*]] = mul i64 [[__R_0_I16_I]], 10
-// AMDGCNSPIRV-NEXT: [[CONV5_I21_I:%.*]] = zext nneg i8 [[TMP8]] to i64
+// AMDGCNSPIRV-NEXT: [[CONV5_I21_I:%.*]] = zext nneg i8 [[TMP10]] to i64
// AMDGCNSPIRV-NEXT: [[ADD_I22_I:%.*]] = add i64 [[MUL_I20_I]], -48
// AMDGCNSPIRV-NEXT: [[SUB_I23_I:%.*]] = add i64 [[ADD_I22_I]], [[CONV5_I21_I]]
// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_1_I25_I_IDX:%.*]] = zext i1 [[OR_COND_I19_I]] to i64
@@ -410,7 +386,7 @@ extern "C" __device__ uint64_t test___make_mantissa_base16(const char *p) {
// AMDGCNSPIRV-NEXT: [[__R_1_I26_I]] = select i1 [[OR_COND_I19_I]], i64 [[SUB_I23_I]], i64 [[__R_0_I16_I]]
// AMDGCNSPIRV-NEXT: br i1 [[OR_COND_I19_I]], label [[WHILE_COND_I14_I]], label [[_ZL15__MAKE_MANTISSAPKC_EXIT]], !llvm.loop [[LOOP11]]
// AMDGCNSPIRV: _ZL15__make_mantissaPKc.exit:
-// AMDGCNSPIRV-NEXT: [[RETVAL_0_I:%.*]] = phi i64 [ 0, [[WHILE_BODY_I_I]] ], [ [[__R_0_I_I]], [[WHILE_COND_I_I]] ], [ 0, [[CLEANUP_I_I]] ], [ [[__R_0_I30_I]], [[WHILE_COND_I28_I]] ], [ 0, [[WHILE_BODY_I18_I]] ], [ [[__R_0_I16_I]], [[WHILE_COND_I14_I]] ]
+// AMDGCNSPIRV-NEXT: [[RETVAL_0_I:%.*]] = phi i64 [ 0, [[IF_THEN5_I]] ], [ 0, [[WHILE_BODY_I_I]] ], [ [[__R_0_I_I]], [[WHILE_COND_I_I]] ], [ [[ADD28_I_I]], [[IF_END31_I_I]] ], [ 0, [[IF_ELSE17_I_I]] ], [ 0, [[WHILE_BODY_I18_I]] ], [ [[__R_0_I16_I]], [[WHILE_COND_I14_I]] ]
// AMDGCNSPIRV-NEXT: ret i64 [[RETVAL_0_I]]
//
extern "C" __device__ uint64_t test___make_mantissa(const char *p) {
@@ -3771,96 +3747,90 @@ extern "C" __device__ double test_modf(double x, double* y) {
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[TMP0:%.*]] = load i8, ptr [[TAG:%.*]], align 1, !tbaa [[TBAA4]]
// DEFAULT-NEXT: [[CMP_I_I:%.*]] = icmp eq i8 [[TMP0]], 48
-// DEFAULT-NEXT: br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[WHILE_COND_I14_I_I:%.*]]
+// DEFAULT-NEXT: br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[WHILE_COND_I14_I_I_PREHEADER:%.*]]
+// DEFAULT: while.cond.i14.i.i.preheader:
+// DEFAULT-NEXT: [[TMP1:%.*]] = load i8, ptr [[TAG]], align 1, !tbaa [[TBAA4]]
+// DEFAULT-NEXT: [[CMP_NOT_I17_I_I5:%.*]] = icmp eq i8 [[TMP1]], 0
+// DEFAULT-NEXT: br i1 [[CMP_NOT_I17_I_I5]], label [[_ZL4NANFPKC_EXIT:%.*]], label [[WHILE_BODY_I18_I_I:%.*]]
// DEFAULT: if.then.i.i:
// DEFAULT-NEXT: [[INCDEC_PTR_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[TAG]], i64 1
-// DEFAULT-NEXT: [[TMP1:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
-// DEFAULT-NEXT: switch i8 [[TMP1]], label [[WHILE_COND_I_I_I:%.*]] [
-// DEFAULT-NEXT: i8 120, label [[WHILE_COND_I30_I_I_PREHEADER:%.*]]
-// DEFAULT-NEXT: i8 88, label [[WHILE_COND_I30_I_I_PREHEADER]]
+// DEFAULT-NEXT: [[TMP2:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// DEFAULT-NEXT: switch i8 [[TMP2]], label [[WHILE_COND_I_I_I_PREHEADER:%.*]] [
+// DEFAULT-NEXT: i8 120, label [[IF_THEN5_I_I:%.*]]
+// DEFAULT-NEXT: i8 88, label [[IF_THEN5_I_I]]
// DEFAULT-NEXT: ]
-// DEFAULT: while.cond.i30.i.i.preheader:
-// DEFAULT-NEXT: br label [[WHILE_COND_I30_I_I:%.*]]
-// DEFAULT: while.cond.i30.i.i:
-// DEFAULT-NEXT: [[__TAGP_ADDR_0_I31_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I37_I_I:%.*]], [[CLEANUP_I36_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[WHILE_COND_I30_I_I_PREHEADER]] ]
-// DEFAULT-NEXT: [[__R_0_I32_I_I:%.*]] = phi i64 [ [[__R_2_I_I_I:%.*]], [[CLEANUP_I36_I_I]] ], [ 0, [[WHILE_COND_I30_I_I_PREHEADER]] ]
-// DEFAULT-NEXT: [[TMP2:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I31_I_I]], align 1, !tbaa [[TBAA4]]
-// DEFAULT-NEXT: [[CMP_NOT_I33_I_I:%.*]] = icmp eq i8 [[TMP2]], 0
-// DEFAULT-NEXT: br i1 [[CMP_NOT_I33_I_I]], label [[_ZL4NANFPKC_EXIT:%.*]], label [[WHILE_BODY_I34_I_I:%.*]]
-// DEFAULT: while.body.i34.i.i:
-// DEFAULT-NEXT: [[TMP3:%.*]] = add i8 [[TMP2]], -48
-// DEFAULT-NEXT: [[OR_COND_I35_I_I:%.*]] = icmp ult i8 [[TMP3]], 10
-// DEFAULT-NEXT: br i1 [[OR_COND_I35_I_I]], label [[IF_END31_I_I_I:%.*]], label [[IF_ELSE_I_I_I:%.*]]
+// DEFAULT: while.cond.i.i.i.preheader:
+// DEFAULT-NEXT: [[TMP3:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// DEFAULT-NEXT: [[CMP_NOT_I_I_I14:%.*]] = icmp eq i8 [[TMP3]], 0
+// DEFAULT-NEXT: br i1 [[CMP_NOT_I_I_I14]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I_I_I:%.*]]
+// DEFAULT: if.then5.i.i:
+// DEFAULT-NEXT: [[TMP4:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// DEFAULT-NEXT: [[CMP_NOT_I30_I_I9:%.*]] = icmp eq i8 [[TMP4]], 0
+// DEFAULT-NEXT: br i1 [[CMP_NOT_I30_I_I9]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I31_I_I:%.*]]
+// DEFAULT: while.body.i31.i.i:
+// DEFAULT-NEXT: [[TMP5:%.*]] = phi i8 [ [[TMP9:%.*]], [[IF_END31_I_I_I:%.*]] ], [ [[TMP4]], [[IF_THEN5_I_I]] ]
+// DEFAULT-NEXT: [[__R_0_I29_I_I11:%.*]] = phi i64 [ [[ADD28_I_I_I:%.*]], [[IF_END31_I_I_I]] ], [ 0, [[IF_THEN5_I_I]] ]
+// DEFAULT-NEXT: [[__TAGP_ADDR_0_I28_I_I10:%.*]] = phi ptr [ [[INCDEC_PTR_I34_I_I:%.*]], [[IF_END31_I_I_I]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN5_I_I]] ]
+// DEFAULT-NEXT: [[TMP6:%.*]] = add i8 [[TMP5]], -48
+// DEFAULT-NEXT: [[OR_COND_I32_I_I:%.*]] = icmp ult i8 [[TMP6]], 10
+// DEFAULT-NEXT: br i1 [[OR_COND_I32_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE_I_I_I:%.*]]
// DEFAULT: if.else.i.i.i:
-// DEFAULT-NEXT: [[TMP4:%.*]] = add i8 [[TMP2]], -97
-// DEFAULT-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP4]], 6
+// DEFAULT-NEXT: [[TMP7:%.*]] = add i8 [[TMP5]], -97
+// DEFAULT-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP7]], 6
// DEFAULT-NEXT: br i1 [[OR_COND33_I_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE17_I_I_I:%.*]]
// DEFAULT: if.else17.i.i.i:
-// DEFAULT-NEXT: [[TMP5:%.*]] = add i8 [[TMP2]], -65
-// DEFAULT-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP5]], 6
-// DEFAULT-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[CLEANUP_I36_I_I]]
+// DEFAULT-NEXT: [[TMP8:%.*]] = add i8 [[TMP5]], -65
+// DEFAULT-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP8]], 6
+// DEFAULT-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[_ZL4NANFPKC_EXIT]]
// DEFAULT: if.end31.i.i.i:
-// DEFAULT-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I34_I_I]] ], [ -87, [[IF_ELSE_I_I_I]] ], [ -55, [[IF_ELSE17_I_I_I]] ]
-// DEFAULT-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I32_I_I]], 4
-// DEFAULT-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP2]] to i64
+// DEFAULT-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I31_I_I]] ], [ -87, [[IF_ELSE_I_I_I]] ], [ -55, [[IF_ELSE17_I_I_I]] ]
+// DEFAULT-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I29_I_I11]], 4
+// DEFAULT-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP5]] to i64
// DEFAULT-NEXT: [[ADD26_I_I_I:%.*]] = add i64 [[MUL24_I_I_I]], [[DOTSINK]]
-// DEFAULT-NEXT: [[ADD28_I_I_I:%.*]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
-// DEFAULT-NEXT: [[INCDEC_PTR_I40_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I31_I_I]], i64 1
-// DEFAULT-NEXT: br label [[CLEANUP_I36_I_I]]
-// DEFAULT: cleanup.i36.i.i:
-// DEFAULT-NEXT: [[__TAGP_ADDR_1_I37_I_I]] = phi ptr [ [[INCDEC_PTR_I40_I_I]], [[IF_END31_I_I_I]] ], [ [[__TAGP_ADDR_0_I31_I_I]], [[IF_ELSE17_I_I_I]] ]
-// DEFAULT-NEXT: [[__R_2_I_I_I]] = phi i64 [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ [[__R_0_I32_I_I]], [[IF_ELSE17_I_I_I]] ]
-// DEFAULT-NEXT: [[COND_I_I_I:%.*]] = phi i1 [ true, [[IF_END31_I_I_I]] ], [ false, [[IF_ELSE17_I_I_I]] ]
-// DEFAULT-NEXT: br i1 [[COND_I_I_I]], label [[WHILE_COND_I30_I_I]], label [[_ZL4NANFPKC_EXIT]], !llvm.loop [[LOOP11]]
-// DEFAULT: while.cond.i.i.i:
-// DEFAULT-NEXT: [[__TAGP_ADDR_0_I_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I_I_I:%.*]], [[CLEANUP_I_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN_I_I]] ]
-// DEFAULT-NEXT: [[__R_0_I_I_I:%.*]] = phi i64 [ [[__R_1_I_I_I:%.*]], [[CLEANUP_I_I_I]] ], [ 0, [[IF_THEN_I_I]] ]
-// DEFAULT-NEXT: [[TMP6:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I_I_I]], align 1, !tbaa [[TBAA4]]
-// DEFAULT-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP6]], 0
-// DEFAULT-NEXT: br i1 [[CMP_NOT_I_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I_I_I:%.*]]
+// DEFAULT-NEXT: [[ADD28_I_I_I]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
+// DEFAULT-NEXT: [[INCDEC_PTR_I34_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I28_I_I10]], i64 1
+// DEFAULT-NEXT: [[TMP9]] = load i8, ptr [[INCDEC_PTR_I34_I_I]], align 1, !tbaa [[TBAA4]]
+// DEFAULT-NEXT: [[CMP_NOT_I30_I_I:%.*]] = icmp eq i8 [[TMP9]], 0
+// DEFAULT-NEXT: br i1 [[CMP_NOT_I30_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I31_I_I]], !llvm.loop [[LOOP11]]
// DEFAULT: while.body.i.i.i:
-// DEFAULT-NEXT: [[TMP7:%.*]] = and i8 [[TMP6]], -8
-// DEFAULT-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP7]], 48
-// DEFAULT-NEXT: br i1 [[OR_COND_I_I_I]], label [[IF_THEN_I_I_I:%.*]], label [[CLEANUP_I_I_I]]
+// DEFAULT-NEXT: [[TMP10:%.*]] = phi i8 [ [[TMP12:%.*]], [[IF_THEN_I_I_I:%.*]] ], [ [[TMP3]], [[WHILE_COND_I_I_I_PREHEADER]] ]
+// DEFAULT-NEXT: [[__R_0_I_I_I16:%.*]] = phi i64 [ [[SUB_I_I_I:%.*]], [[IF_THEN_I_I_I]] ], [ 0, [[WHILE_COND_I_I_I_PREHEADER]] ]
+// DEFAULT-NEXT: [[__TAGP_ADDR_0_I_I_I15:%.*]] = phi ptr [ [[INCDEC_PTR_I_I_I:%.*]], [[IF_THEN_I_I_I]] ], [ [[INCDEC_PTR_I_I]], [[WHILE_COND_I_I_I_PREHEADER]] ]
+// DEFAULT-NEXT: [[TMP11:%.*]] = and i8 [[TMP10]], -8
+// DEFAULT-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP11]], 48
+// DEFAULT-NEXT: br i1 [[OR_COND_I_I_I]], label [[IF_THEN_I_I_I]], label [[_ZL4NANFPKC_EXIT]]
// DEFAULT: if.then.i.i.i:
-// DEFAULT-NEXT: [[MUL_I_I_I:%.*]] = shl i64 [[__R_0_I_I_I]], 3
-// DEFAULT-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP6]] to i64
+// DEFAULT-NEXT: [[MUL_I_I_I:%.*]] = shl i64 [[__R_0_I_I_I16]], 3
+// DEFAULT-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP10]] to i64
// DEFAULT-NEXT: [[ADD_I_I_I:%.*]] = add i64 [[MUL_I_I_I]], -48
-// DEFAULT-NEXT: [[SUB_I_I_I:%.*]] = add i64 [[ADD_I_I_I]], [[CONV5_I_I_I]]
-// DEFAULT-NEXT: [[INCDEC_PTR_I_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I_I_I]], i64 1
-// DEFAULT-NEXT: br label [[CLEANUP_I_I_I]]
-// DEFAULT: cleanup.i.i.i:
-// DEFAULT-NEXT: [[__TAGP_ADDR_1_I_I_I]] = phi ptr [ [[INCDEC_PTR_I_I_I]], [[IF_THEN_I_I_I]] ], [ [[__TAGP_ADDR_0_I_I_I]], [[WHILE_BODY_I_I_I]] ]
-// DEFAULT-NEXT: [[__R_1_I_I_I]] = phi i64 [ [[SUB_I_I_I]], [[IF_THEN_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_BODY_I_I_I]] ]
-// DEFAULT-NEXT: br i1 [[OR_COND_I_I_I]], label [[WHILE_COND_I_I_I]], label [[_ZL4NANFPKC_EXIT]], !llvm.loop [[LOOP7]]
-// DEFAULT: while.cond.i14.i.i:
-// DEFAULT-NEXT: [[__TAGP_ADDR_0_I15_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I21_I_I:%.*]], [[CLEANUP_I20_I_I:%.*]] ], [ [[TAG]], [[ENTRY:%.*]] ]
-// DEFAULT-NEXT: [[__R_0_I16_I_I:%.*]] = phi i64 [ [[__R_1_I22_I_I:%.*]], [[CLEANUP_I20_I_I]] ], [ 0, [[ENTRY]] ]
-// DEFAULT-NEXT: [[TMP8:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I15_I_I]], align 1, !tbaa [[TBAA4]]
-// DEFAULT-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP8]], 0
-// DEFAULT-NEXT: br i1 [[CMP_NOT_I17_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I18_I_I:%.*]]
+// DEFAULT-NEXT: [[SUB_I_I_I]] = add i64 [[ADD_I_I_I]], [[CONV5_I_I_I]]
+// DEFAULT-NEXT: [[INCDEC_PTR_I_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I_I_I15]], i64 1
+// DEFAULT-NEXT: [[TMP12]] = load i8, ptr [[INCDEC_PTR_I_I_I]], align 1, !tbaa [[TBAA4]]
+// DEFAULT-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP12]], 0
+// DEFAULT-NEXT: br i1 [[CMP_NOT_I_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I_I_I]], !llvm.loop [[LOOP7]]
// DEFAULT: while.body.i18.i.i:
-// DEFAULT-NEXT: [[TMP9:%.*]] = add i8 [[TMP8]], -48
-// DEFAULT-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP9]], 10
-// DEFAULT-NEXT: br i1 [[OR_COND_I19_I_I]], label [[IF_THEN_I24_I_I:%.*]], label [[CLEANUP_I20_I_I]]
-// DEFAULT: if.then.i24.i.i:
-// DEFAULT-NEXT: [[MUL_I25_I_I:%.*]] = mul i64 [[__R_0_I16_I_I]], 10
-// DEFAULT-NEXT: [[CONV5_I26_I_I:%.*]] = zext nneg i8 [[TMP8]] to i64
-// DEFAULT-NEXT: [[ADD_I27_I_I:%.*]] = add i64 [[MUL_I25_I_I]], -48
-// DEFAULT-NEXT: [[SUB_I28_I_I:%.*]] = add i64 [[ADD_I27_I_I]], [[CONV5_I26_I_I]]
-// DEFAULT-NEXT: [[INCDEC_PTR_I29_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I15_I_I]], i64 1
-// DEFAULT-NEXT: br label [[CLEANUP_I20_I_I]]
-// DEFAULT: cleanup.i20.i.i:
-// DEFAULT-NEXT: [[__TAGP_ADDR_1_I21_I_I]] = phi ptr [ [[INCDEC_PTR_I29_I_I]], [[IF_THEN_I24_I_I]] ], [ [[__TAGP_ADDR_0_I15_I_I]], [[WHILE_BODY_I18_I_I]] ]
-// DEFAULT-NEXT: [[__R_1_I22_I_I]] = phi i64 [ [[SUB_I28_I_I]], [[IF_THEN_I24_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_BODY_I18_I_I]] ]
-// DEFAULT-NEXT: br i1 [[OR_COND_I19_I_I]], label [[WHILE_COND_I14_I_I]], label [[_ZL4NANFPKC_EXIT]], !llvm.loop [[LOOP10]]
+// DEFAULT-NEXT: [[TMP13:%.*]] = phi i8 [ [[TMP15:%.*]], [[IF_THEN_I21_I_I:%.*]] ], [ [[TMP1]], [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// DEFAULT-NEXT: [[__R_0_I16_I_I7:%.*]] = phi i64 [ [[SUB_I25_I_I:%.*]], [[IF_THEN_I21_I_I]] ], [ 0, [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// DEFAULT-NEXT: [[__TAGP_ADDR_0_I15_I_I6:%.*]] = phi ptr [ [[INCDEC_PTR_I26_I_I:%.*]], [[IF_THEN_I21_I_I]] ], [ [[TAG]], [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// DEFAULT-NEXT: [[TMP14:%.*]] = add i8 [[TMP13]], -48
+// DEFAULT-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP14]], 10
+// DEFAULT-NEXT: br i1 [[OR_COND_I19_I_I]], label [[IF_THEN_I21_I_I]], label [[_ZL4NANFPKC_EXIT]]
+// DEFAULT: if.then.i21.i.i:
+// DEFAULT-NEXT: [[MUL_I22_I_I:%.*]] = mul i64 [[__R_0_I16_I_I7]], 10
+// DEFAULT-NEXT: [[CONV5_I23_I_I:%.*]] = zext nneg i8 [[TMP13]] to i64
+// DEFAULT-NEXT: [[ADD_I24_I_I:%.*]] = add i64 [[MUL_I22_I_I]], -48
+// DEFAULT-NEXT: [[SUB_I25_I_I]] = add i64 [[ADD_I24_I_I]], [[CONV5_I23_I_I]]
+// DEFAULT-NEXT: [[INCDEC_PTR_I26_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I15_I_I6]], i64 1
+// DEFAULT-NEXT: [[TMP15]] = load i8, ptr [[INCDEC_PTR_I26_I_I]], align 1, !tbaa [[TBAA4]]
+// DEFAULT-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP15]], 0
+// DEFAULT-NEXT: br i1 [[CMP_NOT_I17_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I18_I_I]], !llvm.loop [[LOOP10]]
// DEFAULT: _ZL4nanfPKc.exit:
-// DEFAULT-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[CLEANUP_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_COND_I_I_I]] ], [ 0, [[CLEANUP_I36_I_I]] ], [ [[__R_0_I32_I_I]], [[WHILE_COND_I30_I_I]] ], [ 0, [[CLEANUP_I20_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_COND_I14_I_I]] ]
+// DEFAULT-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[WHILE_COND_I_I_I_PREHEADER]] ], [ 0, [[IF_THEN5_I_I]] ], [ 0, [[WHILE_COND_I14_I_I_PREHEADER]] ], [ [[SUB_I_I_I]], [[IF_THEN_I_I_I]] ], [ 0, [[WHILE_BODY_I_I_I]] ], [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ 0, [[IF_ELSE17_I_I_I]] ], [ [[SUB_I25_I_I]], [[IF_THEN_I21_I_I]] ], [ 0, [[WHILE_BODY_I18_I_I]] ]
// DEFAULT-NEXT: [[CONV_I:%.*]] = trunc i64 [[RETVAL_0_I_I]] to i32
// DEFAULT-NEXT: [[BF_VALUE_I:%.*]] = and i32 [[CONV_I]], 4194303
// DEFAULT-NEXT: [[BF_SET9_I:%.*]] = or disjoint i32 [[BF_VALUE_I]], 2143289344
-// DEFAULT-NEXT: [[TMP10:%.*]] = bitcast i32 [[BF_SET9_I]] to float
-// DEFAULT-NEXT: ret float [[TMP10]]
+// DEFAULT-NEXT: [[TMP16:%.*]] = bitcast i32 [[BF_SET9_I]] to float
+// DEFAULT-NEXT: ret float [[TMP16]]
//
// FINITEONLY-LABEL: @test_nanf(
// FINITEONLY-NEXT: entry:
@@ -3870,191 +3840,179 @@ extern "C" __device__ double test_modf(double x, double* y) {
// APPROX-NEXT: entry:
// APPROX-NEXT: [[TMP0:%.*]] = load i8, ptr [[TAG:%.*]], align 1, !tbaa [[TBAA4]]
// APPROX-NEXT: [[CMP_I_I:%.*]] = icmp eq i8 [[TMP0]], 48
-// APPROX-NEXT: br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[WHILE_COND_I14_I_I:%.*]]
+// APPROX-NEXT: br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[WHILE_COND_I14_I_I_PREHEADER:%.*]]
+// APPROX: while.cond.i14.i.i.preheader:
+// APPROX-NEXT: [[TMP1:%.*]] = load i8, ptr [[TAG]], align 1, !tbaa [[TBAA4]]
+// APPROX-NEXT: [[CMP_NOT_I17_I_I5:%.*]] = icmp eq i8 [[TMP1]], 0
+// APPROX-NEXT: br i1 [[CMP_NOT_I17_I_I5]], label [[_ZL4NANFPKC_EXIT:%.*]], label [[WHILE_BODY_I18_I_I:%.*]]
// APPROX: if.then.i.i:
// APPROX-NEXT: [[INCDEC_PTR_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[TAG]], i64 1
-// APPROX-NEXT: [[TMP1:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
-// APPROX-NEXT: switch i8 [[TMP1]], label [[WHILE_COND_I_I_I:%.*]] [
-// APPROX-NEXT: i8 120, label [[WHILE_COND_I30_I_I_PREHEADER:%.*]]
-// APPROX-NEXT: i8 88, label [[WHILE_COND_I30_I_I_PREHEADER]]
+// APPROX-NEXT: [[TMP2:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// APPROX-NEXT: switch i8 [[TMP2]], label [[WHILE_COND_I_I_I_PREHEADER:%.*]] [
+// APPROX-NEXT: i8 120, label [[IF_THEN5_I_I:%.*]]
+// APPROX-NEXT: i8 88, label [[IF_THEN5_I_I]]
// APPROX-NEXT: ]
-// APPROX: while.cond.i30.i.i.preheader:
-// APPROX-NEXT: br label [[WHILE_COND_I30_I_I:%.*]]
-// APPROX: while.cond.i30.i.i:
-// APPROX-NEXT: [[__TAGP_ADDR_0_I31_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I37_I_I:%.*]], [[CLEANUP_I36_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[WHILE_COND_I30_I_I_PREHEADER]] ]
-// APPROX-NEXT: [[__R_0_I32_I_I:%.*]] = phi i64 [ [[__R_2_I_I_I:%.*]], [[CLEANUP_I36_I_I]] ], [ 0, [[WHILE_COND_I30_I_I_PREHEADER]] ]
-// APPROX-NEXT: [[TMP2:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I31_I_I]], align 1, !tbaa [[TBAA4]]
-// APPROX-NEXT: [[CMP_NOT_I33_I_I:%.*]] = icmp eq i8 [[TMP2]], 0
-// APPROX-NEXT: br i1 [[CMP_NOT_I33_I_I]], label [[_ZL4NANFPKC_EXIT:%.*]], label [[WHILE_BODY_I34_I_I:%.*]]
-// APPROX: while.body.i34.i.i:
-// APPROX-NEXT: [[TMP3:%.*]] = add i8 [[TMP2]], -48
-// APPROX-NEXT: [[OR_COND_I35_I_I:%.*]] = icmp ult i8 [[TMP3]], 10
-// APPROX-NEXT: br i1 [[OR_COND_I35_I_I]], label [[IF_END31_I_I_I:%.*]], label [[IF_ELSE_I_I_I:%.*]]
+// APPROX: while.cond.i.i.i.preheader:
+// APPROX-NEXT: [[TMP3:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// APPROX-NEXT: [[CMP_NOT_I_I_I14:%.*]] = icmp eq i8 [[TMP3]], 0
+// APPROX-NEXT: br i1 [[CMP_NOT_I_I_I14]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I_I_I:%.*]]
+// APPROX: if.then5.i.i:
+// APPROX-NEXT: [[TMP4:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// APPROX-NEXT: [[CMP_NOT_I30_I_I9:%.*]] = icmp eq i8 [[TMP4]], 0
+// APPROX-NEXT: br i1 [[CMP_NOT_I30_I_I9]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I31_I_I:%.*]]
+// APPROX: while.body.i31.i.i:
+// APPROX-NEXT: [[TMP5:%.*]] = phi i8 [ [[TMP9:%.*]], [[IF_END31_I_I_I:%.*]] ], [ [[TMP4]], [[IF_THEN5_I_I]] ]
+// APPROX-NEXT: [[__R_0_I29_I_I11:%.*]] = phi i64 [ [[ADD28_I_I_I:%.*]], [[IF_END31_I_I_I]] ], [ 0, [[IF_THEN5_I_I]] ]
+// APPROX-NEXT: [[__TAGP_ADDR_0_I28_I_I10:%.*]] = phi ptr [ [[INCDEC_PTR_I34_I_I:%.*]], [[IF_END31_I_I_I]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN5_I_I]] ]
+// APPROX-NEXT: [[TMP6:%.*]] = add i8 [[TMP5]], -48
+// APPROX-NEXT: [[OR_COND_I32_I_I:%.*]] = icmp ult i8 [[TMP6]], 10
+// APPROX-NEXT: br i1 [[OR_COND_I32_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE_I_I_I:%.*]]
// APPROX: if.else.i.i.i:
-// APPROX-NEXT: [[TMP4:%.*]] = add i8 [[TMP2]], -97
-// APPROX-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP4]], 6
+// APPROX-NEXT: [[TMP7:%.*]] = add i8 [[TMP5]], -97
+// APPROX-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP7]], 6
// APPROX-NEXT: br i1 [[OR_COND33_I_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE17_I_I_I:%.*]]
// APPROX: if.else17.i.i.i:
-// APPROX-NEXT: [[TMP5:%.*]] = add i8 [[TMP2]], -65
-// APPROX-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP5]], 6
-// APPROX-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[CLEANUP_I36_I_I]]
+// APPROX-NEXT: [[TMP8:%.*]] = add i8 [[TMP5]], -65
+// APPROX-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP8]], 6
+// APPROX-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[_ZL4NANFPKC_EXIT]]
// APPROX: if.end31.i.i.i:
-// APPROX-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I34_I_I]] ], [ -87, [[IF_ELSE_I_I_I]] ], [ -55, [[IF_ELSE17_I_I_I]] ]
-// APPROX-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I32_I_I]], 4
-// APPROX-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP2]] to i64
+// APPROX-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I31_I_I]] ], [ -87, [[IF_ELSE_I_I_I]] ], [ -55, [[IF_ELSE17_I_I_I]] ]
+// APPROX-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I29_I_I11]], 4
+// APPROX-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP5]] to i64
// APPROX-NEXT: [[ADD26_I_I_I:%.*]] = add i64 [[MUL24_I_I_I]], [[DOTSINK]]
-// APPROX-NEXT: [[ADD28_I_I_I:%.*]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
-// APPROX-NEXT: [[INCDEC_PTR_I40_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I31_I_I]], i64 1
-// APPROX-NEXT: br label [[CLEANUP_I36_I_I]]
-// APPROX: cleanup.i36.i.i:
-// APPROX-NEXT: [[__TAGP_ADDR_1_I37_I_I]] = phi ptr [ [[INCDEC_PTR_I40_I_I]], [[IF_END31_I_I_I]] ], [ [[__TAGP_ADDR_0_I31_I_I]], [[IF_ELSE17_I_I_I]] ]
-// APPROX-NEXT: [[__R_2_I_I_I]] = phi i64 [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ [[__R_0_I32_I_I]], [[IF_ELSE17_I_I_I]] ]
-// APPROX-NEXT: [[COND_I_I_I:%.*]] = phi i1 [ true, [[IF_END31_I_I_I]] ], [ false, [[IF_ELSE17_I_I_I]] ]
-// APPROX-NEXT: br i1 [[COND_I_I_I]], label [[WHILE_COND_I30_I_I]], label [[_ZL4NANFPKC_EXIT]], !llvm.loop [[LOOP11]]
-// APPROX: while.cond.i.i.i:
-// APPROX-NEXT: [[__TAGP_ADDR_0_I_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I_I_I:%.*]], [[CLEANUP_I_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN_I_I]] ]
-// APPROX-NEXT: [[__R_0_I_I_I:%.*]] = phi i64 [ [[__R_1_I_I_I:%.*]], [[CLEANUP_I_I_I]] ], [ 0, [[IF_THEN_I_I]] ]
-// APPROX-NEXT: [[TMP6:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I_I_I]], align 1, !tbaa [[TBAA4]]
-// APPROX-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP6]], 0
-// APPROX-NEXT: br i1 [[CMP_NOT_I_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I_I_I:%.*]]
+// APPROX-NEXT: [[ADD28_I_I_I]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
+// APPROX-NEXT: [[INCDEC_PTR_I34_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I28_I_I10]], i64 1
+// APPROX-NEXT: [[TMP9]] = load i8, ptr [[INCDEC_PTR_I34_I_I]], align 1, !tbaa [[TBAA4]]
+// APPROX-NEXT: [[CMP_NOT_I30_I_I:%.*]] = icmp eq i8 [[TMP9]], 0
+// APPROX-NEXT: br i1 [[CMP_NOT_I30_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I31_I_I]], !llvm.loop [[LOOP11]]
// APPROX: while.body.i.i.i:
-// APPROX-NEXT: [[TMP7:%.*]] = and i8 [[TMP6]], -8
-// APPROX-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP7]], 48
-// APPROX-NEXT: br i1 [[OR_COND_I_I_I]], label [[IF_THEN_I_I_I:%.*]], label [[CLEANUP_I_I_I]]
+// APPROX-NEXT: [[TMP10:%.*]] = phi i8 [ [[TMP12:%.*]], [[IF_THEN_I_I_I:%.*]] ], [ [[TMP3]], [[WHILE_COND_I_I_I_PREHEADER]] ]
+// APPROX-NEXT: [[__R_0_I_I_I16:%.*]] = phi i64 [ [[SUB_I_I_I:%.*]], [[IF_THEN_I_I_I]] ], [ 0, [[WHILE_COND_I_I_I_PREHEADER]] ]
+// APPROX-NEXT: [[__TAGP_ADDR_0_I_I_I15:%.*]] = phi ptr [ [[INCDEC_PTR_I_I_I:%.*]], [[IF_THEN_I_I_I]] ], [ [[INCDEC_PTR_I_I]], [[WHILE_COND_I_I_I_PREHEADER]] ]
+// APPROX-NEXT: [[TMP11:%.*]] = and i8 [[TMP10]], -8
+// APPROX-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP11]], 48
+// APPROX-NEXT: br i1 [[OR_COND_I_I_I]], label [[IF_THEN_I_I_I]], label [[_ZL4NANFPKC_EXIT]]
// APPROX: if.then.i.i.i:
-// APPROX-NEXT: [[MUL_I_I_I:%.*]] = shl i64 [[__R_0_I_I_I]], 3
-// APPROX-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP6]] to i64
+// APPROX-NEXT: [[MUL_I_I_I:%.*]] = shl i64 [[__R_0_I_I_I16]], 3
+// APPROX-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP10]] to i64
// APPROX-NEXT: [[ADD_I_I_I:%.*]] = add i64 [[MUL_I_I_I]], -48
-// APPROX-NEXT: [[SUB_I_I_I:%.*]] = add i64 [[ADD_I_I_I]], [[CONV5_I_I_I]]
-// APPROX-NEXT: [[INCDEC_PTR_I_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I_I_I]], i64 1
-// APPROX-NEXT: br label [[CLEANUP_I_I_I]]
-// APPROX: cleanup.i.i.i:
-// APPROX-NEXT: [[__TAGP_ADDR_1_I_I_I]] = phi ptr [ [[INCDEC_PTR_I_I_I]], [[IF_THEN_I_I_I]] ], [ [[__TAGP_ADDR_0_I_I_I]], [[WHILE_BODY_I_I_I]] ]
-// APPROX-NEXT: [[__R_1_I_I_I]] = phi i64 [ [[SUB_I_I_I]], [[IF_THEN_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_BODY_I_I_I]] ]
-// APPROX-NEXT: br i1 [[OR_COND_I_I_I]], label [[WHILE_COND_I_I_I]], label [[_ZL4NANFPKC_EXIT]], !llvm.loop [[LOOP7]]
-// APPROX: while.cond.i14.i.i:
-// APPROX-NEXT: [[__TAGP_ADDR_0_I15_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I21_I_I:%.*]], [[CLEANUP_I20_I_I:%.*]] ], [ [[TAG]], [[ENTRY:%.*]] ]
-// APPROX-NEXT: [[__R_0_I16_I_I:%.*]] = phi i64 [ [[__R_1_I22_I_I:%.*]], [[CLEANUP_I20_I_I]] ], [ 0, [[ENTRY]] ]
-// APPROX-NEXT: [[TMP8:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I15_I_I]], align 1, !tbaa [[TBAA4]]
-// APPROX-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP8]], 0
-// APPROX-NEXT: br i1 [[CMP_NOT_I17_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I18_I_I:%.*]]
+// APPROX-NEXT: [[SUB_I_I_I]] = add i64 [[ADD_I_I_I]], [[CONV5_I_I_I]]
+// APPROX-NEXT: [[INCDEC_PTR_I_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I_I_I15]], i64 1
+// APPROX-NEXT: [[TMP12]] = load i8, ptr [[INCDEC_PTR_I_I_I]], align 1, !tbaa [[TBAA4]]
+// APPROX-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP12]], 0
+// APPROX-NEXT: br i1 [[CMP_NOT_I_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I_I_I]], !llvm.loop [[LOOP7]]
// APPROX: while.body.i18.i.i:
-// APPROX-NEXT: [[TMP9:%.*]] = add i8 [[TMP8]], -48
-// APPROX-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP9]], 10
-// APPROX-NEXT: br i1 [[OR_COND_I19_I_I]], label [[IF_THEN_I24_I_I:%.*]], label [[CLEANUP_I20_I_I]]
-// APPROX: if.then.i24.i.i:
-// APPROX-NEXT: [[MUL_I25_I_I:%.*]] = mul i64 [[__R_0_I16_I_I]], 10
-// APPROX-NEXT: [[CONV5_I26_I_I:%.*]] = zext nneg i8 [[TMP8]] to i64
-// APPROX-NEXT: [[ADD_I27_I_I:%.*]] = add i64 [[MUL_I25_I_I]], -48
-// APPROX-NEXT: [[SUB_I28_I_I:%.*]] = add i64 [[ADD_I27_I_I]], [[CONV5_I26_I_I]]
-// APPROX-NEXT: [[INCDEC_PTR_I29_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I15_I_I]], i64 1
-// APPROX-NEXT: br label [[CLEANUP_I20_I_I]]
-// APPROX: cleanup.i20.i.i:
-// APPROX-NEXT: [[__TAGP_ADDR_1_I21_I_I]] = phi ptr [ [[INCDEC_PTR_I29_I_I]], [[IF_THEN_I24_I_I]] ], [ [[__TAGP_ADDR_0_I15_I_I]], [[WHILE_BODY_I18_I_I]] ]
-// APPROX-NEXT: [[__R_1_I22_I_I]] = phi i64 [ [[SUB_I28_I_I]], [[IF_THEN_I24_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_BODY_I18_I_I]] ]
-// APPROX-NEXT: br i1 [[OR_COND_I19_I_I]], label [[WHILE_COND_I14_I_I]], label [[_ZL4NANFPKC_EXIT]], !llvm.loop [[LOOP10]]
+// APPROX-NEXT: [[TMP13:%.*]] = phi i8 [ [[TMP15:%.*]], [[IF_THEN_I21_I_I:%.*]] ], [ [[TMP1]], [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// APPROX-NEXT: [[__R_0_I16_I_I7:%.*]] = phi i64 [ [[SUB_I25_I_I:%.*]], [[IF_THEN_I21_I_I]] ], [ 0, [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// APPROX-NEXT: [[__TAGP_ADDR_0_I15_I_I6:%.*]] = phi ptr [ [[INCDEC_PTR_I26_I_I:%.*]], [[IF_THEN_I21_I_I]] ], [ [[TAG]], [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// APPROX-NEXT: [[TMP14:%.*]] = add i8 [[TMP13]], -48
+// APPROX-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP14]], 10
+// APPROX-NEXT: br i1 [[OR_COND_I19_I_I]], label [[IF_THEN_I21_I_I]], label [[_ZL4NANFPKC_EXIT]]
+// APPROX: if.then.i21.i.i:
+// APPROX-NEXT: [[MUL_I22_I_I:%.*]] = mul i64 [[__R_0_I16_I_I7]], 10
+// APPROX-NEXT: [[CONV5_I23_I_I:%.*]] = zext nneg i8 [[TMP13]] to i64
+// APPROX-NEXT: [[ADD_I24_I_I:%.*]] = add i64 [[MUL_I22_I_I]], -48
+// APPROX-NEXT: [[SUB_I25_I_I]] = add i64 [[ADD_I24_I_I]], [[CONV5_I23_I_I]]
+// APPROX-NEXT: [[INCDEC_PTR_I26_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I15_I_I6]], i64 1
+// APPROX-NEXT: [[TMP15]] = load i8, ptr [[INCDEC_PTR_I26_I_I]], align 1, !tbaa [[TBAA4]]
+// APPROX-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP15]], 0
+// APPROX-NEXT: br i1 [[CMP_NOT_I17_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I18_I_I]], !llvm.loop [[LOOP10]]
// APPROX: _ZL4nanfPKc.exit:
-// APPROX-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[CLEANUP_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_COND_I_I_I]] ], [ 0, [[CLEANUP_I36_I_I]] ], [ [[__R_0_I32_I_I]], [[WHILE_COND_I30_I_I]] ], [ 0, [[CLEANUP_I20_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_COND_I14_I_I]] ]
+// APPROX-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[WHILE_COND_I_I_I_PREHEADER]] ], [ 0, [[IF_THEN5_I_I]] ], [ 0, [[WHILE_COND_I14_I_I_PREHEADER]] ], [ [[SUB_I_I_I]], [[IF_THEN_I_I_I]] ], [ 0, [[WHILE_BODY_I_I_I]] ], [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ 0, [[IF_ELSE17_I_I_I]] ], [ [[SUB_I25_I_I]], [[IF_THEN_I21_I_I]] ], [ 0, [[WHILE_BODY_I18_I_I]] ]
// APPROX-NEXT: [[CONV_I:%.*]] = trunc i64 [[RETVAL_0_I_I]] to i32
// APPROX-NEXT: [[BF_VALUE_I:%.*]] = and i32 [[CONV_I]], 4194303
// APPROX-NEXT: [[BF_SET9_I:%.*]] = or disjoint i32 [[BF_VALUE_I]], 2143289344
-// APPROX-NEXT: [[TMP10:%.*]] = bitcast i32 [[BF_SET9_I]] to float
-// APPROX-NEXT: ret float [[TMP10]]
+// APPROX-NEXT: [[TMP16:%.*]] = bitcast i32 [[BF_SET9_I]] to float
+// APPROX-NEXT: ret float [[TMP16]]
//
// NCRDIV-LABEL: @test_nanf(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[TMP0:%.*]] = load i8, ptr [[TAG:%.*]], align 1, !tbaa [[TBAA4]]
// NCRDIV-NEXT: [[CMP_I_I:%.*]] = icmp eq i8 [[TMP0]], 48
-// NCRDIV-NEXT: br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[WHILE_COND_I14_I_I:%.*]]
+// NCRDIV-NEXT: br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[WHILE_COND_I14_I_I_PREHEADER:%.*]]
+// NCRDIV: while.cond.i14.i.i.preheader:
+// NCRDIV-NEXT: [[TMP1:%.*]] = load i8, ptr [[TAG]], align 1, !tbaa [[TBAA4]]
+// NCRDIV-NEXT: [[CMP_NOT_I17_I_I5:%.*]] = icmp eq i8 [[TMP1]], 0
+// NCRDIV-NEXT: br i1 [[CMP_NOT_I17_I_I5]], label [[_ZL4NANFPKC_EXIT:%.*]], label [[WHILE_BODY_I18_I_I:%.*]]
// NCRDIV: if.then.i.i:
// NCRDIV-NEXT: [[INCDEC_PTR_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[TAG]], i64 1
-// NCRDIV-NEXT: [[TMP1:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
-// NCRDIV-NEXT: switch i8 [[TMP1]], label [[WHILE_COND_I_I_I:%.*]] [
-// NCRDIV-NEXT: i8 120, label [[WHILE_COND_I30_I_I_PREHEADER:%.*]]
-// NCRDIV-NEXT: i8 88, label [[WHILE_COND_I30_I_I_PREHEADER]]
+// NCRDIV-NEXT: [[TMP2:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// NCRDIV-NEXT: switch i8 [[TMP2]], label [[WHILE_COND_I_I_I_PREHEADER:%.*]] [
+// NCRDIV-NEXT: i8 120, label [[IF_THEN5_I_I:%.*]]
+// NCRDIV-NEXT: i8 88, label [[IF_THEN5_I_I]]
// NCRDIV-NEXT: ]
-// NCRDIV: while.cond.i30.i.i.preheader:
-// NCRDIV-NEXT: br label [[WHILE_COND_I30_I_I:%.*]]
-// NCRDIV: while.cond.i30.i.i:
-// NCRDIV-NEXT: [[__TAGP_ADDR_0_I31_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I37_I_I:%.*]], [[CLEANUP_I36_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[WHILE_COND_I30_I_I_PREHEADER]] ]
-// NCRDIV-NEXT: [[__R_0_I32_I_I:%.*]] = phi i64 [ [[__R_2_I_I_I:%.*]], [[CLEANUP_I36_I_I]] ], [ 0, [[WHILE_COND_I30_I_I_PREHEADER]] ]
-// NCRDIV-NEXT: [[TMP2:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I31_I_I]], align 1, !tbaa [[TBAA4]]
-// NCRDIV-NEXT: [[CMP_NOT_I33_I_I:%.*]] = icmp eq i8 [[TMP2]], 0
-// NCRDIV-NEXT: br i1 [[CMP_NOT_I33_I_I]], label [[_ZL4NANFPKC_EXIT:%.*]], label [[WHILE_BODY_I34_I_I:%.*]]
-// NCRDIV: while.body.i34.i.i:
-// NCRDIV-NEXT: [[TMP3:%.*]] = add i8 [[TMP2]], -48
-// NCRDIV-NEXT: [[OR_COND_I35_I_I:%.*]] = icmp ult i8 [[TMP3]], 10
-// NCRDIV-NEXT: br i1 [[OR_COND_I35_I_I]], label [[IF_END31_I_I_I:%.*]], label [[IF_ELSE_I_I_I:%.*]]
+// NCRDIV: while.cond.i.i.i.preheader:
+// NCRDIV-NEXT: [[TMP3:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// NCRDIV-NEXT: [[CMP_NOT_I_I_I14:%.*]] = icmp eq i8 [[TMP3]], 0
+// NCRDIV-NEXT: br i1 [[CMP_NOT_I_I_I14]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I_I_I:%.*]]
+// NCRDIV: if.then5.i.i:
+// NCRDIV-NEXT: [[TMP4:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// NCRDIV-NEXT: [[CMP_NOT_I30_I_I9:%.*]] = icmp eq i8 [[TMP4]], 0
+// NCRDIV-NEXT: br i1 [[CMP_NOT_I30_I_I9]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I31_I_I:%.*]]
+// NCRDIV: while.body.i31.i.i:
+// NCRDIV-NEXT: [[TMP5:%.*]] = phi i8 [ [[TMP9:%.*]], [[IF_END31_I_I_I:%.*]] ], [ [[TMP4]], [[IF_THEN5_I_I]] ]
+// NCRDIV-NEXT: [[__R_0_I29_I_I11:%.*]] = phi i64 [ [[ADD28_I_I_I:%.*]], [[IF_END31_I_I_I]] ], [ 0, [[IF_THEN5_I_I]] ]
+// NCRDIV-NEXT: [[__TAGP_ADDR_0_I28_I_I10:%.*]] = phi ptr [ [[INCDEC_PTR_I34_I_I:%.*]], [[IF_END31_I_I_I]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN5_I_I]] ]
+// NCRDIV-NEXT: [[TMP6:%.*]] = add i8 [[TMP5]], -48
+// NCRDIV-NEXT: [[OR_COND_I32_I_I:%.*]] = icmp ult i8 [[TMP6]], 10
+// NCRDIV-NEXT: br i1 [[OR_COND_I32_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE_I_I_I:%.*]]
// NCRDIV: if.else.i.i.i:
-// NCRDIV-NEXT: [[TMP4:%.*]] = add i8 [[TMP2]], -97
-// NCRDIV-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP4]], 6
+// NCRDIV-NEXT: [[TMP7:%.*]] = add i8 [[TMP5]], -97
+// NCRDIV-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP7]], 6
// NCRDIV-NEXT: br i1 [[OR_COND33_I_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE17_I_I_I:%.*]]
// NCRDIV: if.else17.i.i.i:
-// NCRDIV-NEXT: [[TMP5:%.*]] = add i8 [[TMP2]], -65
-// NCRDIV-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP5]], 6
-// NCRDIV-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[CLEANUP_I36_I_I]]
+// NCRDIV-NEXT: [[TMP8:%.*]] = add i8 [[TMP5]], -65
+// NCRDIV-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP8]], 6
+// NCRDIV-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[_ZL4NANFPKC_EXIT]]
// NCRDIV: if.end31.i.i.i:
-// NCRDIV-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I34_I_I]] ], [ -87, [[IF_ELSE_I_I_I]] ], [ -55, [[IF_ELSE17_I_I_I]] ]
-// NCRDIV-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I32_I_I]], 4
-// NCRDIV-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP2]] to i64
+// NCRDIV-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I31_I_I]] ], [ -87, [[IF_ELSE_I_I_I]] ], [ -55, [[IF_ELSE17_I_I_I]] ]
+// NCRDIV-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I29_I_I11]], 4
+// NCRDIV-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP5]] to i64
// NCRDIV-NEXT: [[ADD26_I_I_I:%.*]] = add i64 [[MUL24_I_I_I]], [[DOTSINK]]
-// NCRDIV-NEXT: [[ADD28_I_I_I:%.*]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
-// NCRDIV-NEXT: [[INCDEC_PTR_I40_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I31_I_I]], i64 1
-// NCRDIV-NEXT: br label [[CLEANUP_I36_I_I]]
-// NCRDIV: cleanup.i36.i.i:
-// NCRDIV-NEXT: [[__TAGP_ADDR_1_I37_I_I]] = phi ptr [ [[INCDEC_PTR_I40_I_I]], [[IF_END31_I_I_I]] ], [ [[__TAGP_ADDR_0_I31_I_I]], [[IF_ELSE17_I_I_I]] ]
-// NCRDIV-NEXT: [[__R_2_I_I_I]] = phi i64 [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ [[__R_0_I32_I_I]], [[IF_ELSE17_I_I_I]] ]
-// NCRDIV-NEXT: [[COND_I_I_I:%.*]] = phi i1 [ true, [[IF_END31_I_I_I]] ], [ false, [[IF_ELSE17_I_I_I]] ]
-// NCRDIV-NEXT: br i1 [[COND_I_I_I]], label [[WHILE_COND_I30_I_I]], label [[_ZL4NANFPKC_EXIT]], !llvm.loop [[LOOP11]]
-// NCRDIV: while.cond.i.i.i:
-// NCRDIV-NEXT: [[__TAGP_ADDR_0_I_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I_I_I:%.*]], [[CLEANUP_I_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN_I_I]] ]
-// NCRDIV-NEXT: [[__R_0_I_I_I:%.*]] = phi i64 [ [[__R_1_I_I_I:%.*]], [[CLEANUP_I_I_I]] ], [ 0, [[IF_THEN_I_I]] ]
-// NCRDIV-NEXT: [[TMP6:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I_I_I]], align 1, !tbaa [[TBAA4]]
-// NCRDIV-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP6]], 0
-// NCRDIV-NEXT: br i1 [[CMP_NOT_I_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I_I_I:%.*]]
+// NCRDIV-NEXT: [[ADD28_I_I_I]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
+// NCRDIV-NEXT: [[INCDEC_PTR_I34_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I28_I_I10]], i64 1
+// NCRDIV-NEXT: [[TMP9]] = load i8, ptr [[INCDEC_PTR_I34_I_I]], align 1, !tbaa [[TBAA4]]
+// NCRDIV-NEXT: [[CMP_NOT_I30_I_I:%.*]] = icmp eq i8 [[TMP9]], 0
+// NCRDIV-NEXT: br i1 [[CMP_NOT_I30_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I31_I_I]], !llvm.loop [[LOOP11]]
// NCRDIV: while.body.i.i.i:
-// NCRDIV-NEXT: [[TMP7:%.*]] = and i8 [[TMP6]], -8
-// NCRDIV-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP7]], 48
-// NCRDIV-NEXT: br i1 [[OR_COND_I_I_I]], label [[IF_THEN_I_I_I:%.*]], label [[CLEANUP_I_I_I]]
+// NCRDIV-NEXT: [[TMP10:%.*]] = phi i8 [ [[TMP12:%.*]], [[IF_THEN_I_I_I:%.*]] ], [ [[TMP3]], [[WHILE_COND_I_I_I_PREHEADER]] ]
+// NCRDIV-NEXT: [[__R_0_I_I_I16:%.*]] = phi i64 [ [[SUB_I_I_I:%.*]], [[IF_THEN_I_I_I]] ], [ 0, [[WHILE_COND_I_I_I_PREHEADER]] ]
+// NCRDIV-NEXT: [[__TAGP_ADDR_0_I_I_I15:%.*]] = phi ptr [ [[INCDEC_PTR_I_I_I:%.*]], [[IF_THEN_I_I_I]] ], [ [[INCDEC_PTR_I_I]], [[WHILE_COND_I_I_I_PREHEADER]] ]
+// NCRDIV-NEXT: [[TMP11:%.*]] = and i8 [[TMP10]], -8
+// NCRDIV-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP11]], 48
+// NCRDIV-NEXT: br i1 [[OR_COND_I_I_I]], label [[IF_THEN_I_I_I]], label [[_ZL4NANFPKC_EXIT]]
// NCRDIV: if.then.i.i.i:
-// NCRDIV-NEXT: [[MUL_I_I_I:%.*]] = shl i64 [[__R_0_I_I_I]], 3
-// NCRDIV-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP6]] to i64
+// NCRDIV-NEXT: [[MUL_I_I_I:%.*]] = shl i64 [[__R_0_I_I_I16]], 3
+// NCRDIV-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP10]] to i64
// NCRDIV-NEXT: [[ADD_I_I_I:%.*]] = add i64 [[MUL_I_I_I]], -48
-// NCRDIV-NEXT: [[SUB_I_I_I:%.*]] = add i64 [[ADD_I_I_I]], [[CONV5_I_I_I]]
-// NCRDIV-NEXT: [[INCDEC_PTR_I_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I_I_I]], i64 1
-// NCRDIV-NEXT: br label [[CLEANUP_I_I_I]]
-// NCRDIV: cleanup.i.i.i:
-// NCRDIV-NEXT: [[__TAGP_ADDR_1_I_I_I]] = phi ptr [ [[INCDEC_PTR_I_I_I]], [[IF_THEN_I_I_I]] ], [ [[__TAGP_ADDR_0_I_I_I]], [[WHILE_BODY_I_I_I]] ]
-// NCRDIV-NEXT: [[__R_1_I_I_I]] = phi i64 [ [[SUB_I_I_I]], [[IF_THEN_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_BODY_I_I_I]] ]
-// NCRDIV-NEXT: br i1 [[OR_COND_I_I_I]], label [[WHILE_COND_I_I_I]], label [[_ZL4NANFPKC_EXIT]], !llvm.loop [[LOOP7]]
-// NCRDIV: while.cond.i14.i.i:
-// NCRDIV-NEXT: [[__TAGP_ADDR_0_I15_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I21_I_I:%.*]], [[CLEANUP_I20_I_I:%.*]] ], [ [[TAG]], [[ENTRY:%.*]] ]
-// NCRDIV-NEXT: [[__R_0_I16_I_I:%.*]] = phi i64 [ [[__R_1_I22_I_I:%.*]], [[CLEANUP_I20_I_I]] ], [ 0, [[ENTRY]] ]
-// NCRDIV-NEXT: [[TMP8:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I15_I_I]], align 1, !tbaa [[TBAA4]]
-// NCRDIV-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP8]], 0
-// NCRDIV-NEXT: br i1 [[CMP_NOT_I17_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I18_I_I:%.*]]
+// NCRDIV-NEXT: [[SUB_I_I_I]] = add i64 [[ADD_I_I_I]], [[CONV5_I_I_I]]
+// NCRDIV-NEXT: [[INCDEC_PTR_I_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I_I_I15]], i64 1
+// NCRDIV-NEXT: [[TMP12]] = load i8, ptr [[INCDEC_PTR_I_I_I]], align 1, !tbaa [[TBAA4]]
+// NCRDIV-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP12]], 0
+// NCRDIV-NEXT: br i1 [[CMP_NOT_I_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I_I_I]], !llvm.loop [[LOOP7]]
// NCRDIV: while.body.i18.i.i:
-// NCRDIV-NEXT: [[TMP9:%.*]] = add i8 [[TMP8]], -48
-// NCRDIV-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP9]], 10
-// NCRDIV-NEXT: br i1 [[OR_COND_I19_I_I]], label [[IF_THEN_I24_I_I:%.*]], label [[CLEANUP_I20_I_I]]
-// NCRDIV: if.then.i24.i.i:
-// NCRDIV-NEXT: [[MUL_I25_I_I:%.*]] = mul i64 [[__R_0_I16_I_I]], 10
-// NCRDIV-NEXT: [[CONV5_I26_I_I:%.*]] = zext nneg i8 [[TMP8]] to i64
-// NCRDIV-NEXT: [[ADD_I27_I_I:%.*]] = add i64 [[MUL_I25_I_I]], -48
-// NCRDIV-NEXT: [[SUB_I28_I_I:%.*]] = add i64 [[ADD_I27_I_I]], [[CONV5_I26_I_I]]
-// NCRDIV-NEXT: [[INCDEC_PTR_I29_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I15_I_I]], i64 1
-// NCRDIV-NEXT: br label [[CLEANUP_I20_I_I]]
-// NCRDIV: cleanup.i20.i.i:
-// NCRDIV-NEXT: [[__TAGP_ADDR_1_I21_I_I]] = phi ptr [ [[INCDEC_PTR_I29_I_I]], [[IF_THEN_I24_I_I]] ], [ [[__TAGP_ADDR_0_I15_I_I]], [[WHILE_BODY_I18_I_I]] ]
-// NCRDIV-NEXT: [[__R_1_I22_I_I]] = phi i64 [ [[SUB_I28_I_I]], [[IF_THEN_I24_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_BODY_I18_I_I]] ]
-// NCRDIV-NEXT: br i1 [[OR_COND_I19_I_I]], label [[WHILE_COND_I14_I_I]], label [[_ZL4NANFPKC_EXIT]], !llvm.loop [[LOOP10]]
+// NCRDIV-NEXT: [[TMP13:%.*]] = phi i8 [ [[TMP15:%.*]], [[IF_THEN_I21_I_I:%.*]] ], [ [[TMP1]], [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// NCRDIV-NEXT: [[__R_0_I16_I_I7:%.*]] = phi i64 [ [[SUB_I25_I_I:%.*]], [[IF_THEN_I21_I_I]] ], [ 0, [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// NCRDIV-NEXT: [[__TAGP_ADDR_0_I15_I_I6:%.*]] = phi ptr [ [[INCDEC_PTR_I26_I_I:%.*]], [[IF_THEN_I21_I_I]] ], [ [[TAG]], [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// NCRDIV-NEXT: [[TMP14:%.*]] = add i8 [[TMP13]], -48
+// NCRDIV-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP14]], 10
+// NCRDIV-NEXT: br i1 [[OR_COND_I19_I_I]], label [[IF_THEN_I21_I_I]], label [[_ZL4NANFPKC_EXIT]]
+// NCRDIV: if.then.i21.i.i:
+// NCRDIV-NEXT: [[MUL_I22_I_I:%.*]] = mul i64 [[__R_0_I16_I_I7]], 10
+// NCRDIV-NEXT: [[CONV5_I23_I_I:%.*]] = zext nneg i8 [[TMP13]] to i64
+// NCRDIV-NEXT: [[ADD_I24_I_I:%.*]] = add i64 [[MUL_I22_I_I]], -48
+// NCRDIV-NEXT: [[SUB_I25_I_I]] = add i64 [[ADD_I24_I_I]], [[CONV5_I23_I_I]]
+// NCRDIV-NEXT: [[INCDEC_PTR_I26_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I15_I_I6]], i64 1
+// NCRDIV-NEXT: [[TMP15]] = load i8, ptr [[INCDEC_PTR_I26_I_I]], align 1, !tbaa [[TBAA4]]
+// NCRDIV-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP15]], 0
+// NCRDIV-NEXT: br i1 [[CMP_NOT_I17_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I18_I_I]], !llvm.loop [[LOOP10]]
// NCRDIV: _ZL4nanfPKc.exit:
-// NCRDIV-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[CLEANUP_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_COND_I_I_I]] ], [ 0, [[CLEANUP_I36_I_I]] ], [ [[__R_0_I32_I_I]], [[WHILE_COND_I30_I_I]] ], [ 0, [[CLEANUP_I20_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_COND_I14_I_I]] ]
+// NCRDIV-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[WHILE_COND_I_I_I_PREHEADER]] ], [ 0, [[IF_THEN5_I_I]] ], [ 0, [[WHILE_COND_I14_I_I_PREHEADER]] ], [ [[SUB_I_I_I]], [[IF_THEN_I_I_I]] ], [ 0, [[WHILE_BODY_I_I_I]] ], [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ 0, [[IF_ELSE17_I_I_I]] ], [ [[SUB_I25_I_I]], [[IF_THEN_I21_I_I]] ], [ 0, [[WHILE_BODY_I18_I_I]] ]
// NCRDIV-NEXT: [[CONV_I:%.*]] = trunc i64 [[RETVAL_0_I_I]] to i32
// NCRDIV-NEXT: [[BF_VALUE_I:%.*]] = and i32 [[CONV_I]], 4194303
// NCRDIV-NEXT: [[BF_SET9_I:%.*]] = or disjoint i32 [[BF_VALUE_I]], 2143289344
-// NCRDIV-NEXT: [[TMP10:%.*]] = bitcast i32 [[BF_SET9_I]] to float
-// NCRDIV-NEXT: ret float [[TMP10]]
+// NCRDIV-NEXT: [[TMP16:%.*]] = bitcast i32 [[BF_SET9_I]] to float
+// NCRDIV-NEXT: ret float [[TMP16]]
//
// AMDGCNSPIRV-LABEL: @test_nanf(
// AMDGCNSPIRV-NEXT: entry:
@@ -4065,53 +4023,49 @@ extern "C" __device__ double test_modf(double x, double* y) {
// AMDGCNSPIRV-NEXT: [[INCDEC_PTR_I_I:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[TAG]], i64 1
// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = load i8, ptr addrspace(4) [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA5]]
// AMDGCNSPIRV-NEXT: switch i8 [[TMP1]], label [[WHILE_COND_I_I_I:%.*]] [
-// AMDGCNSPIRV-NEXT: i8 120, label [[WHILE_COND_I28_I_I_PREHEADER:%.*]]
-// AMDGCNSPIRV-NEXT: i8 88, label [[WHILE_COND_I28_I_I_PREHEADER]]
+// AMDGCNSPIRV-NEXT: i8 120, label [[IF_THEN5_I_I:%.*]]
+// AMDGCNSPIRV-NEXT: i8 88, label [[IF_THEN5_I_I]]
// AMDGCNSPIRV-NEXT: ]
-// AMDGCNSPIRV: while.cond.i28.i.i.preheader:
-// AMDGCNSPIRV-NEXT: br label [[WHILE_COND_I28_I_I:%.*]]
-// AMDGCNSPIRV: while.cond.i28.i.i:
-// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_0_I29_I_I:%.*]] = phi ptr addrspace(4) [ [[__TAGP_ADDR_1_I34_I_I:%.*]], [[CLEANUP_I_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[WHILE_COND_I28_I_I_PREHEADER]] ]
-// AMDGCNSPIRV-NEXT: [[__R_0_I30_I_I:%.*]] = phi i64 [ [[__R_2_I_I_I:%.*]], [[CLEANUP_I_I_I]] ], [ 0, [[WHILE_COND_I28_I_I_PREHEADER]] ]
-// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I29_I_I]], align 1, !tbaa [[TBAA5]]
-// AMDGCNSPIRV-NEXT: [[CMP_NOT_I31_I_I:%.*]] = icmp eq i8 [[TMP2]], 0
-// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I31_I_I]], label [[_ZL4NANFPKC_EXIT:%.*]], label [[WHILE_BODY_I32_I_I:%.*]]
+// AMDGCNSPIRV: if.then5.i.i:
+// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i8, ptr addrspace(4) [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA5]]
+// AMDGCNSPIRV-NEXT: [[CMP_NOT_I31_I_I5:%.*]] = icmp eq i8 [[TMP2]], 0
+// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I31_I_I5]], label [[_ZL4NANFPKC_EXIT:%.*]], label [[WHILE_BODY_I32_I_I:%.*]]
// AMDGCNSPIRV: while.body.i32.i.i:
-// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = add i8 [[TMP2]], -48
-// AMDGCNSPIRV-NEXT: [[OR_COND_I33_I_I:%.*]] = icmp ult i8 [[TMP3]], 10
-// AMDGCNSPIRV-NEXT: br i1 [[OR_COND_I33_I_I]], label [[IF_END31_I_I_I:%.*]], label [[IF_ELSE_I_I_I:%.*]]
+// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = phi i8 [ [[TMP7:%.*]], [[IF_END31_I_I_I:%.*]] ], [ [[TMP2]], [[IF_THEN5_I_I]] ]
+// AMDGCNSPIRV-NEXT: [[__R_0_I30_I_I7:%.*]] = phi i64 [ [[ADD28_I_I_I:%.*]], [[IF_END31_I_I_I]] ], [ 0, [[IF_THEN5_I_I]] ]
+// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_0_I29_I_I6:%.*]] = phi ptr addrspace(4) [ [[INCDEC_PTR_I36_I_I:%.*]], [[IF_END31_I_I_I]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN5_I_I]] ]
+// AMDGCNSPIRV-NEXT: [[TMP4:%.*]] = add i8 [[TMP3]], -48
+// AMDGCNSPIRV-NEXT: [[OR_COND_I33_I_I:%.*]] = icmp ult i8 [[TMP4]], 10
+// AMDGCNSPIRV-NEXT: br i1 [[OR_COND_I33_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE_I_I_I:%.*]]
// AMDGCNSPIRV: if.else.i.i.i:
-// AMDGCNSPIRV-NEXT: [[TMP4:%.*]] = add i8 [[TMP2]], -97
-// AMDGCNSPIRV-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP4]], 6
+// AMDGCNSPIRV-NEXT: [[TMP5:%.*]] = add i8 [[TMP3]], -97
+// AMDGCNSPIRV-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP5]], 6
// AMDGCNSPIRV-NEXT: br i1 [[OR_COND33_I_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE17_I_I_I:%.*]]
// AMDGCNSPIRV: if.else17.i.i.i:
-// AMDGCNSPIRV-NEXT: [[TMP5:%.*]] = add i8 [[TMP2]], -65
-// AMDGCNSPIRV-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP5]], 6
-// AMDGCNSPIRV-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[CLEANUP_I_I_I]]
+// AMDGCNSPIRV-NEXT: [[TMP6:%.*]] = add i8 [[TMP3]], -65
+// AMDGCNSPIRV-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP6]], 6
+// AMDGCNSPIRV-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[_ZL4NANFPKC_EXIT]]
// AMDGCNSPIRV: if.end31.i.i.i:
// AMDGCNSPIRV-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I32_I_I]] ], [ -87, [[IF_ELSE_I_I_I]] ], [ -55, [[IF_ELSE17_I_I_I]] ]
-// AMDGCNSPIRV-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I30_I_I]], 4
-// AMDGCNSPIRV-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP2]] to i64
+// AMDGCNSPIRV-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I30_I_I7]], 4
+// AMDGCNSPIRV-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP3]] to i64
// AMDGCNSPIRV-NEXT: [[ADD26_I_I_I:%.*]] = add i64 [[MUL24_I_I_I]], [[DOTSINK]]
-// AMDGCNSPIRV-NEXT: [[ADD28_I_I_I:%.*]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
-// AMDGCNSPIRV-NEXT: [[INCDEC_PTR_I37_I_I:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[__TAGP_ADDR_0_I29_I_I]], i64 1
-// AMDGCNSPIRV-NEXT: br label [[CLEANUP_I_I_I]]
-// AMDGCNSPIRV: cleanup.i.i.i:
-// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_1_I34_I_I]] = phi ptr addrspace(4) [ [[INCDEC_PTR_I37_I_I]], [[IF_END31_I_I_I]] ], [ [[__TAGP_ADDR_0_I29_I_I]], [[IF_ELSE17_I_I_I]] ]
-// AMDGCNSPIRV-NEXT: [[__R_2_I_I_I]] = phi i64 [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ [[__R_0_I30_I_I]], [[IF_ELSE17_I_I_I]] ]
-// AMDGCNSPIRV-NEXT: [[COND_I_I_I:%.*]] = phi i1 [ true, [[IF_END31_I_I_I]] ], [ false, [[IF_ELSE17_I_I_I]] ]
-// AMDGCNSPIRV-NEXT: br i1 [[COND_I_I_I]], label [[WHILE_COND_I28_I_I]], label [[_ZL4NANFPKC_EXIT]], !llvm.loop [[LOOP12]]
+// AMDGCNSPIRV-NEXT: [[ADD28_I_I_I]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
+// AMDGCNSPIRV-NEXT: [[INCDEC_PTR_I36_I_I]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[__TAGP_ADDR_0_I29_I_I6]], i64 1
+// AMDGCNSPIRV-NEXT: [[TMP7]] = load i8, ptr addrspace(4) [[INCDEC_PTR_I36_I_I]], align 1, !tbaa [[TBAA5]]
+// AMDGCNSPIRV-NEXT: [[CMP_NOT_I31_I_I:%.*]] = icmp eq i8 [[TMP7]], 0
+// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I31_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I32_I_I]], !llvm.loop [[LOOP12]]
// AMDGCNSPIRV: while.cond.i.i.i:
// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_0_I_I_I:%.*]] = phi ptr addrspace(4) [ [[__TAGP_ADDR_1_I_I_I:%.*]], [[WHILE_BODY_I_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN_I_I]] ]
// AMDGCNSPIRV-NEXT: [[__R_0_I_I_I:%.*]] = phi i64 [ [[__R_1_I_I_I:%.*]], [[WHILE_BODY_I_I_I]] ], [ 0, [[IF_THEN_I_I]] ]
-// AMDGCNSPIRV-NEXT: [[TMP6:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I_I_I]], align 1, !tbaa [[TBAA5]]
-// AMDGCNSPIRV-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP6]], 0
+// AMDGCNSPIRV-NEXT: [[TMP8:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I_I_I]], align 1, !tbaa [[TBAA5]]
+// AMDGCNSPIRV-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP8]], 0
// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I_I_I]]
// AMDGCNSPIRV: while.body.i.i.i:
-// AMDGCNSPIRV-NEXT: [[TMP7:%.*]] = and i8 [[TMP6]], -8
-// AMDGCNSPIRV-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP7]], 48
+// AMDGCNSPIRV-NEXT: [[TMP9:%.*]] = and i8 [[TMP8]], -8
+// AMDGCNSPIRV-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP9]], 48
// AMDGCNSPIRV-NEXT: [[MUL_I_I_I:%.*]] = shl i64 [[__R_0_I_I_I]], 3
-// AMDGCNSPIRV-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP6]] to i64
+// AMDGCNSPIRV-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP8]] to i64
// AMDGCNSPIRV-NEXT: [[ADD_I_I_I:%.*]] = add i64 [[MUL_I_I_I]], -48
// AMDGCNSPIRV-NEXT: [[SUB_I_I_I:%.*]] = add i64 [[ADD_I_I_I]], [[CONV5_I_I_I]]
// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_1_I_I_I_IDX:%.*]] = zext i1 [[OR_COND_I_I_I]] to i64
@@ -4121,14 +4075,14 @@ extern "C" __device__ double test_modf(double x, double* y) {
// AMDGCNSPIRV: while.cond.i14.i.i:
// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_0_I15_I_I:%.*]] = phi ptr addrspace(4) [ [[__TAGP_ADDR_1_I25_I_I:%.*]], [[WHILE_BODY_I18_I_I:%.*]] ], [ [[TAG]], [[ENTRY:%.*]] ]
// AMDGCNSPIRV-NEXT: [[__R_0_I16_I_I:%.*]] = phi i64 [ [[__R_1_I26_I_I:%.*]], [[WHILE_BODY_I18_I_I]] ], [ 0, [[ENTRY]] ]
-// AMDGCNSPIRV-NEXT: [[TMP8:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I15_I_I]], align 1, !tbaa [[TBAA5]]
-// AMDGCNSPIRV-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP8]], 0
+// AMDGCNSPIRV-NEXT: [[TMP10:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I15_I_I]], align 1, !tbaa [[TBAA5]]
+// AMDGCNSPIRV-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP10]], 0
// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I17_I_I]], label [[_ZL4NANFPKC_EXIT]], label [[WHILE_BODY_I18_I_I]]
// AMDGCNSPIRV: while.body.i18.i.i:
-// AMDGCNSPIRV-NEXT: [[TMP9:%.*]] = add i8 [[TMP8]], -48
-// AMDGCNSPIRV-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP9]], 10
+// AMDGCNSPIRV-NEXT: [[TMP11:%.*]] = add i8 [[TMP10]], -48
+// AMDGCNSPIRV-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP11]], 10
// AMDGCNSPIRV-NEXT: [[MUL_I20_I_I:%.*]] = mul i64 [[__R_0_I16_I_I]], 10
-// AMDGCNSPIRV-NEXT: [[CONV5_I21_I_I:%.*]] = zext nneg i8 [[TMP8]] to i64
+// AMDGCNSPIRV-NEXT: [[CONV5_I21_I_I:%.*]] = zext nneg i8 [[TMP10]] to i64
// AMDGCNSPIRV-NEXT: [[ADD_I22_I_I:%.*]] = add i64 [[MUL_I20_I_I]], -48
// AMDGCNSPIRV-NEXT: [[SUB_I23_I_I:%.*]] = add i64 [[ADD_I22_I_I]], [[CONV5_I21_I_I]]
// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_1_I25_I_I_IDX:%.*]] = zext i1 [[OR_COND_I19_I_I]] to i64
@@ -4136,12 +4090,12 @@ extern "C" __device__ double test_modf(double x, double* y) {
// AMDGCNSPIRV-NEXT: [[__R_1_I26_I_I]] = select i1 [[OR_COND_I19_I_I]], i64 [[SUB_I23_I_I]], i64 [[__R_0_I16_I_I]]
// AMDGCNSPIRV-NEXT: br i1 [[OR_COND_I19_I_I]], label [[WHILE_COND_I14_I_I]], label [[_ZL4NANFPKC_EXIT]], !llvm.loop [[LOOP11]]
// AMDGCNSPIRV: _ZL4nanfPKc.exit:
-// AMDGCNSPIRV-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[WHILE_BODY_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_COND_I_I_I]] ], [ 0, [[CLEANUP_I_I_I]] ], [ [[__R_0_I30_I_I]], [[WHILE_COND_I28_I_I]] ], [ 0, [[WHILE_BODY_I18_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_COND_I14_I_I]] ]
+// AMDGCNSPIRV-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[IF_THEN5_I_I]] ], [ 0, [[WHILE_BODY_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_COND_I_I_I]] ], [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ 0, [[IF_ELSE17_I_I_I]] ], [ 0, [[WHILE_BODY_I18_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_COND_I14_I_I]] ]
// AMDGCNSPIRV-NEXT: [[CONV_I:%.*]] = trunc i64 [[RETVAL_0_I_I]] to i32
// AMDGCNSPIRV-NEXT: [[BF_VALUE_I:%.*]] = and i32 [[CONV_I]], 4194303
// AMDGCNSPIRV-NEXT: [[BF_SET9_I:%.*]] = or disjoint i32 [[BF_VALUE_I]], 2143289344
-// AMDGCNSPIRV-NEXT: [[TMP10:%.*]] = bitcast i32 [[BF_SET9_I]] to float
-// AMDGCNSPIRV-NEXT: ret float [[TMP10]]
+// AMDGCNSPIRV-NEXT: [[TMP12:%.*]] = bitcast i32 [[BF_SET9_I]] to float
+// AMDGCNSPIRV-NEXT: ret float [[TMP12]]
//
extern "C" __device__ float test_nanf(const char *tag) {
return nanf(tag);
@@ -4151,95 +4105,89 @@ extern "C" __device__ float test_nanf(const char *tag) {
// DEFAULT-NEXT: entry:
// DEFAULT-NEXT: [[TMP0:%.*]] = load i8, ptr [[TAG:%.*]], align 1, !tbaa [[TBAA4]]
// DEFAULT-NEXT: [[CMP_I_I:%.*]] = icmp eq i8 [[TMP0]], 48
-// DEFAULT-NEXT: br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[WHILE_COND_I14_I_I:%.*]]
+// DEFAULT-NEXT: br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[WHILE_COND_I14_I_I_PREHEADER:%.*]]
+// DEFAULT: while.cond.i14.i.i.preheader:
+// DEFAULT-NEXT: [[TMP1:%.*]] = load i8, ptr [[TAG]], align 1, !tbaa [[TBAA4]]
+// DEFAULT-NEXT: [[CMP_NOT_I17_I_I5:%.*]] = icmp eq i8 [[TMP1]], 0
+// DEFAULT-NEXT: br i1 [[CMP_NOT_I17_I_I5]], label [[_ZL3NANPKC_EXIT:%.*]], label [[WHILE_BODY_I18_I_I:%.*]]
// DEFAULT: if.then.i.i:
// DEFAULT-NEXT: [[INCDEC_PTR_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[TAG]], i64 1
-// DEFAULT-NEXT: [[TMP1:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
-// DEFAULT-NEXT: switch i8 [[TMP1]], label [[WHILE_COND_I_I_I:%.*]] [
-// DEFAULT-NEXT: i8 120, label [[WHILE_COND_I30_I_I_PREHEADER:%.*]]
-// DEFAULT-NEXT: i8 88, label [[WHILE_COND_I30_I_I_PREHEADER]]
+// DEFAULT-NEXT: [[TMP2:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// DEFAULT-NEXT: switch i8 [[TMP2]], label [[WHILE_COND_I_I_I_PREHEADER:%.*]] [
+// DEFAULT-NEXT: i8 120, label [[IF_THEN5_I_I:%.*]]
+// DEFAULT-NEXT: i8 88, label [[IF_THEN5_I_I]]
// DEFAULT-NEXT: ]
-// DEFAULT: while.cond.i30.i.i.preheader:
-// DEFAULT-NEXT: br label [[WHILE_COND_I30_I_I:%.*]]
-// DEFAULT: while.cond.i30.i.i:
-// DEFAULT-NEXT: [[__TAGP_ADDR_0_I31_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I37_I_I:%.*]], [[CLEANUP_I36_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[WHILE_COND_I30_I_I_PREHEADER]] ]
-// DEFAULT-NEXT: [[__R_0_I32_I_I:%.*]] = phi i64 [ [[__R_2_I_I_I:%.*]], [[CLEANUP_I36_I_I]] ], [ 0, [[WHILE_COND_I30_I_I_PREHEADER]] ]
-// DEFAULT-NEXT: [[TMP2:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I31_I_I]], align 1, !tbaa [[TBAA4]]
-// DEFAULT-NEXT: [[CMP_NOT_I33_I_I:%.*]] = icmp eq i8 [[TMP2]], 0
-// DEFAULT-NEXT: br i1 [[CMP_NOT_I33_I_I]], label [[_ZL3NANPKC_EXIT:%.*]], label [[WHILE_BODY_I34_I_I:%.*]]
-// DEFAULT: while.body.i34.i.i:
-// DEFAULT-NEXT: [[TMP3:%.*]] = add i8 [[TMP2]], -48
-// DEFAULT-NEXT: [[OR_COND_I35_I_I:%.*]] = icmp ult i8 [[TMP3]], 10
-// DEFAULT-NEXT: br i1 [[OR_COND_I35_I_I]], label [[IF_END31_I_I_I:%.*]], label [[IF_ELSE_I_I_I:%.*]]
+// DEFAULT: while.cond.i.i.i.preheader:
+// DEFAULT-NEXT: [[TMP3:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// DEFAULT-NEXT: [[CMP_NOT_I_I_I14:%.*]] = icmp eq i8 [[TMP3]], 0
+// DEFAULT-NEXT: br i1 [[CMP_NOT_I_I_I14]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I_I_I:%.*]]
+// DEFAULT: if.then5.i.i:
+// DEFAULT-NEXT: [[TMP4:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// DEFAULT-NEXT: [[CMP_NOT_I30_I_I9:%.*]] = icmp eq i8 [[TMP4]], 0
+// DEFAULT-NEXT: br i1 [[CMP_NOT_I30_I_I9]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I31_I_I:%.*]]
+// DEFAULT: while.body.i31.i.i:
+// DEFAULT-NEXT: [[TMP5:%.*]] = phi i8 [ [[TMP9:%.*]], [[IF_END31_I_I_I:%.*]] ], [ [[TMP4]], [[IF_THEN5_I_I]] ]
+// DEFAULT-NEXT: [[__R_0_I29_I_I11:%.*]] = phi i64 [ [[ADD28_I_I_I:%.*]], [[IF_END31_I_I_I]] ], [ 0, [[IF_THEN5_I_I]] ]
+// DEFAULT-NEXT: [[__TAGP_ADDR_0_I28_I_I10:%.*]] = phi ptr [ [[INCDEC_PTR_I34_I_I:%.*]], [[IF_END31_I_I_I]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN5_I_I]] ]
+// DEFAULT-NEXT: [[TMP6:%.*]] = add i8 [[TMP5]], -48
+// DEFAULT-NEXT: [[OR_COND_I32_I_I:%.*]] = icmp ult i8 [[TMP6]], 10
+// DEFAULT-NEXT: br i1 [[OR_COND_I32_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE_I_I_I:%.*]]
// DEFAULT: if.else.i.i.i:
-// DEFAULT-NEXT: [[TMP4:%.*]] = add i8 [[TMP2]], -97
-// DEFAULT-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP4]], 6
+// DEFAULT-NEXT: [[TMP7:%.*]] = add i8 [[TMP5]], -97
+// DEFAULT-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP7]], 6
// DEFAULT-NEXT: br i1 [[OR_COND33_I_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE17_I_I_I:%.*]]
// DEFAULT: if.else17.i.i.i:
-// DEFAULT-NEXT: [[TMP5:%.*]] = add i8 [[TMP2]], -65
-// DEFAULT-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP5]], 6
-// DEFAULT-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[CLEANUP_I36_I_I]]
+// DEFAULT-NEXT: [[TMP8:%.*]] = add i8 [[TMP5]], -65
+// DEFAULT-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP8]], 6
+// DEFAULT-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[_ZL3NANPKC_EXIT]]
// DEFAULT: if.end31.i.i.i:
-// DEFAULT-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I34_I_I]] ], [ -87, [[IF_ELSE_I_I_I]] ], [ -55, [[IF_ELSE17_I_I_I]] ]
-// DEFAULT-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I32_I_I]], 4
-// DEFAULT-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP2]] to i64
+// DEFAULT-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I31_I_I]] ], [ -87, [[IF_ELSE_I_I_I]] ], [ -55, [[IF_ELSE17_I_I_I]] ]
+// DEFAULT-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I29_I_I11]], 4
+// DEFAULT-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP5]] to i64
// DEFAULT-NEXT: [[ADD26_I_I_I:%.*]] = add i64 [[MUL24_I_I_I]], [[DOTSINK]]
-// DEFAULT-NEXT: [[ADD28_I_I_I:%.*]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
-// DEFAULT-NEXT: [[INCDEC_PTR_I40_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I31_I_I]], i64 1
-// DEFAULT-NEXT: br label [[CLEANUP_I36_I_I]]
-// DEFAULT: cleanup.i36.i.i:
-// DEFAULT-NEXT: [[__TAGP_ADDR_1_I37_I_I]] = phi ptr [ [[INCDEC_PTR_I40_I_I]], [[IF_END31_I_I_I]] ], [ [[__TAGP_ADDR_0_I31_I_I]], [[IF_ELSE17_I_I_I]] ]
-// DEFAULT-NEXT: [[__R_2_I_I_I]] = phi i64 [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ [[__R_0_I32_I_I]], [[IF_ELSE17_I_I_I]] ]
-// DEFAULT-NEXT: [[COND_I_I_I:%.*]] = phi i1 [ true, [[IF_END31_I_I_I]] ], [ false, [[IF_ELSE17_I_I_I]] ]
-// DEFAULT-NEXT: br i1 [[COND_I_I_I]], label [[WHILE_COND_I30_I_I]], label [[_ZL3NANPKC_EXIT]], !llvm.loop [[LOOP11]]
-// DEFAULT: while.cond.i.i.i:
-// DEFAULT-NEXT: [[__TAGP_ADDR_0_I_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I_I_I:%.*]], [[CLEANUP_I_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN_I_I]] ]
-// DEFAULT-NEXT: [[__R_0_I_I_I:%.*]] = phi i64 [ [[__R_1_I_I_I:%.*]], [[CLEANUP_I_I_I]] ], [ 0, [[IF_THEN_I_I]] ]
-// DEFAULT-NEXT: [[TMP6:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I_I_I]], align 1, !tbaa [[TBAA4]]
-// DEFAULT-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP6]], 0
-// DEFAULT-NEXT: br i1 [[CMP_NOT_I_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I_I_I:%.*]]
+// DEFAULT-NEXT: [[ADD28_I_I_I]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
+// DEFAULT-NEXT: [[INCDEC_PTR_I34_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I28_I_I10]], i64 1
+// DEFAULT-NEXT: [[TMP9]] = load i8, ptr [[INCDEC_PTR_I34_I_I]], align 1, !tbaa [[TBAA4]]
+// DEFAULT-NEXT: [[CMP_NOT_I30_I_I:%.*]] = icmp eq i8 [[TMP9]], 0
+// DEFAULT-NEXT: br i1 [[CMP_NOT_I30_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I31_I_I]], !llvm.loop [[LOOP11]]
// DEFAULT: while.body.i.i.i:
-// DEFAULT-NEXT: [[TMP7:%.*]] = and i8 [[TMP6]], -8
-// DEFAULT-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP7]], 48
-// DEFAULT-NEXT: br i1 [[OR_COND_I_I_I]], label [[IF_THEN_I_I_I:%.*]], label [[CLEANUP_I_I_I]]
+// DEFAULT-NEXT: [[TMP10:%.*]] = phi i8 [ [[TMP12:%.*]], [[IF_THEN_I_I_I:%.*]] ], [ [[TMP3]], [[WHILE_COND_I_I_I_PREHEADER]] ]
+// DEFAULT-NEXT: [[__R_0_I_I_I16:%.*]] = phi i64 [ [[SUB_I_I_I:%.*]], [[IF_THEN_I_I_I]] ], [ 0, [[WHILE_COND_I_I_I_PREHEADER]] ]
+// DEFAULT-NEXT: [[__TAGP_ADDR_0_I_I_I15:%.*]] = phi ptr [ [[INCDEC_PTR_I_I_I:%.*]], [[IF_THEN_I_I_I]] ], [ [[INCDEC_PTR_I_I]], [[WHILE_COND_I_I_I_PREHEADER]] ]
+// DEFAULT-NEXT: [[TMP11:%.*]] = and i8 [[TMP10]], -8
+// DEFAULT-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP11]], 48
+// DEFAULT-NEXT: br i1 [[OR_COND_I_I_I]], label [[IF_THEN_I_I_I]], label [[_ZL3NANPKC_EXIT]]
// DEFAULT: if.then.i.i.i:
-// DEFAULT-NEXT: [[MUL_I_I_I:%.*]] = shl i64 [[__R_0_I_I_I]], 3
-// DEFAULT-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP6]] to i64
+// DEFAULT-NEXT: [[MUL_I_I_I:%.*]] = shl i64 [[__R_0_I_I_I16]], 3
+// DEFAULT-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP10]] to i64
// DEFAULT-NEXT: [[ADD_I_I_I:%.*]] = add i64 [[MUL_I_I_I]], -48
-// DEFAULT-NEXT: [[SUB_I_I_I:%.*]] = add i64 [[ADD_I_I_I]], [[CONV5_I_I_I]]
-// DEFAULT-NEXT: [[INCDEC_PTR_I_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I_I_I]], i64 1
-// DEFAULT-NEXT: br label [[CLEANUP_I_I_I]]
-// DEFAULT: cleanup.i.i.i:
-// DEFAULT-NEXT: [[__TAGP_ADDR_1_I_I_I]] = phi ptr [ [[INCDEC_PTR_I_I_I]], [[IF_THEN_I_I_I]] ], [ [[__TAGP_ADDR_0_I_I_I]], [[WHILE_BODY_I_I_I]] ]
-// DEFAULT-NEXT: [[__R_1_I_I_I]] = phi i64 [ [[SUB_I_I_I]], [[IF_THEN_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_BODY_I_I_I]] ]
-// DEFAULT-NEXT: br i1 [[OR_COND_I_I_I]], label [[WHILE_COND_I_I_I]], label [[_ZL3NANPKC_EXIT]], !llvm.loop [[LOOP7]]
-// DEFAULT: while.cond.i14.i.i:
-// DEFAULT-NEXT: [[__TAGP_ADDR_0_I15_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I21_I_I:%.*]], [[CLEANUP_I20_I_I:%.*]] ], [ [[TAG]], [[ENTRY:%.*]] ]
-// DEFAULT-NEXT: [[__R_0_I16_I_I:%.*]] = phi i64 [ [[__R_1_I22_I_I:%.*]], [[CLEANUP_I20_I_I]] ], [ 0, [[ENTRY]] ]
-// DEFAULT-NEXT: [[TMP8:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I15_I_I]], align 1, !tbaa [[TBAA4]]
-// DEFAULT-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP8]], 0
-// DEFAULT-NEXT: br i1 [[CMP_NOT_I17_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I18_I_I:%.*]]
+// DEFAULT-NEXT: [[SUB_I_I_I]] = add i64 [[ADD_I_I_I]], [[CONV5_I_I_I]]
+// DEFAULT-NEXT: [[INCDEC_PTR_I_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I_I_I15]], i64 1
+// DEFAULT-NEXT: [[TMP12]] = load i8, ptr [[INCDEC_PTR_I_I_I]], align 1, !tbaa [[TBAA4]]
+// DEFAULT-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP12]], 0
+// DEFAULT-NEXT: br i1 [[CMP_NOT_I_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I_I_I]], !llvm.loop [[LOOP7]]
// DEFAULT: while.body.i18.i.i:
-// DEFAULT-NEXT: [[TMP9:%.*]] = add i8 [[TMP8]], -48
-// DEFAULT-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP9]], 10
-// DEFAULT-NEXT: br i1 [[OR_COND_I19_I_I]], label [[IF_THEN_I24_I_I:%.*]], label [[CLEANUP_I20_I_I]]
-// DEFAULT: if.then.i24.i.i:
-// DEFAULT-NEXT: [[MUL_I25_I_I:%.*]] = mul i64 [[__R_0_I16_I_I]], 10
-// DEFAULT-NEXT: [[CONV5_I26_I_I:%.*]] = zext nneg i8 [[TMP8]] to i64
-// DEFAULT-NEXT: [[ADD_I27_I_I:%.*]] = add i64 [[MUL_I25_I_I]], -48
-// DEFAULT-NEXT: [[SUB_I28_I_I:%.*]] = add i64 [[ADD_I27_I_I]], [[CONV5_I26_I_I]]
-// DEFAULT-NEXT: [[INCDEC_PTR_I29_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I15_I_I]], i64 1
-// DEFAULT-NEXT: br label [[CLEANUP_I20_I_I]]
-// DEFAULT: cleanup.i20.i.i:
-// DEFAULT-NEXT: [[__TAGP_ADDR_1_I21_I_I]] = phi ptr [ [[INCDEC_PTR_I29_I_I]], [[IF_THEN_I24_I_I]] ], [ [[__TAGP_ADDR_0_I15_I_I]], [[WHILE_BODY_I18_I_I]] ]
-// DEFAULT-NEXT: [[__R_1_I22_I_I]] = phi i64 [ [[SUB_I28_I_I]], [[IF_THEN_I24_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_BODY_I18_I_I]] ]
-// DEFAULT-NEXT: br i1 [[OR_COND_I19_I_I]], label [[WHILE_COND_I14_I_I]], label [[_ZL3NANPKC_EXIT]], !llvm.loop [[LOOP10]]
+// DEFAULT-NEXT: [[TMP13:%.*]] = phi i8 [ [[TMP15:%.*]], [[IF_THEN_I21_I_I:%.*]] ], [ [[TMP1]], [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// DEFAULT-NEXT: [[__R_0_I16_I_I7:%.*]] = phi i64 [ [[SUB_I25_I_I:%.*]], [[IF_THEN_I21_I_I]] ], [ 0, [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// DEFAULT-NEXT: [[__TAGP_ADDR_0_I15_I_I6:%.*]] = phi ptr [ [[INCDEC_PTR_I26_I_I:%.*]], [[IF_THEN_I21_I_I]] ], [ [[TAG]], [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// DEFAULT-NEXT: [[TMP14:%.*]] = add i8 [[TMP13]], -48
+// DEFAULT-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP14]], 10
+// DEFAULT-NEXT: br i1 [[OR_COND_I19_I_I]], label [[IF_THEN_I21_I_I]], label [[_ZL3NANPKC_EXIT]]
+// DEFAULT: if.then.i21.i.i:
+// DEFAULT-NEXT: [[MUL_I22_I_I:%.*]] = mul i64 [[__R_0_I16_I_I7]], 10
+// DEFAULT-NEXT: [[CONV5_I23_I_I:%.*]] = zext nneg i8 [[TMP13]] to i64
+// DEFAULT-NEXT: [[ADD_I24_I_I:%.*]] = add i64 [[MUL_I22_I_I]], -48
+// DEFAULT-NEXT: [[SUB_I25_I_I]] = add i64 [[ADD_I24_I_I]], [[CONV5_I23_I_I]]
+// DEFAULT-NEXT: [[INCDEC_PTR_I26_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I15_I_I6]], i64 1
+// DEFAULT-NEXT: [[TMP15]] = load i8, ptr [[INCDEC_PTR_I26_I_I]], align 1, !tbaa [[TBAA4]]
+// DEFAULT-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP15]], 0
+// DEFAULT-NEXT: br i1 [[CMP_NOT_I17_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I18_I_I]], !llvm.loop [[LOOP10]]
// DEFAULT: _ZL3nanPKc.exit:
-// DEFAULT-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[CLEANUP_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_COND_I_I_I]] ], [ 0, [[CLEANUP_I36_I_I]] ], [ [[__R_0_I32_I_I]], [[WHILE_COND_I30_I_I]] ], [ 0, [[CLEANUP_I20_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_COND_I14_I_I]] ]
+// DEFAULT-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[WHILE_COND_I_I_I_PREHEADER]] ], [ 0, [[IF_THEN5_I_I]] ], [ 0, [[WHILE_COND_I14_I_I_PREHEADER]] ], [ [[SUB_I_I_I]], [[IF_THEN_I_I_I]] ], [ 0, [[WHILE_BODY_I_I_I]] ], [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ 0, [[IF_ELSE17_I_I_I]] ], [ [[SUB_I25_I_I]], [[IF_THEN_I21_I_I]] ], [ 0, [[WHILE_BODY_I18_I_I]] ]
// DEFAULT-NEXT: [[BF_VALUE_I:%.*]] = and i64 [[RETVAL_0_I_I]], 2251799813685247
// DEFAULT-NEXT: [[BF_SET9_I:%.*]] = or disjoint i64 [[BF_VALUE_I]], 9221120237041090560
-// DEFAULT-NEXT: [[TMP10:%.*]] = bitcast i64 [[BF_SET9_I]] to double
-// DEFAULT-NEXT: ret double [[TMP10]]
+// DEFAULT-NEXT: [[TMP16:%.*]] = bitcast i64 [[BF_SET9_I]] to double
+// DEFAULT-NEXT: ret double [[TMP16]]
//
// FINITEONLY-LABEL: @test_nan(
// FINITEONLY-NEXT: entry:
@@ -4249,189 +4197,177 @@ extern "C" __device__ float test_nanf(const char *tag) {
// APPROX-NEXT: entry:
// APPROX-NEXT: [[TMP0:%.*]] = load i8, ptr [[TAG:%.*]], align 1, !tbaa [[TBAA4]]
// APPROX-NEXT: [[CMP_I_I:%.*]] = icmp eq i8 [[TMP0]], 48
-// APPROX-NEXT: br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[WHILE_COND_I14_I_I:%.*]]
+// APPROX-NEXT: br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[WHILE_COND_I14_I_I_PREHEADER:%.*]]
+// APPROX: while.cond.i14.i.i.preheader:
+// APPROX-NEXT: [[TMP1:%.*]] = load i8, ptr [[TAG]], align 1, !tbaa [[TBAA4]]
+// APPROX-NEXT: [[CMP_NOT_I17_I_I5:%.*]] = icmp eq i8 [[TMP1]], 0
+// APPROX-NEXT: br i1 [[CMP_NOT_I17_I_I5]], label [[_ZL3NANPKC_EXIT:%.*]], label [[WHILE_BODY_I18_I_I:%.*]]
// APPROX: if.then.i.i:
// APPROX-NEXT: [[INCDEC_PTR_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[TAG]], i64 1
-// APPROX-NEXT: [[TMP1:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
-// APPROX-NEXT: switch i8 [[TMP1]], label [[WHILE_COND_I_I_I:%.*]] [
-// APPROX-NEXT: i8 120, label [[WHILE_COND_I30_I_I_PREHEADER:%.*]]
-// APPROX-NEXT: i8 88, label [[WHILE_COND_I30_I_I_PREHEADER]]
+// APPROX-NEXT: [[TMP2:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// APPROX-NEXT: switch i8 [[TMP2]], label [[WHILE_COND_I_I_I_PREHEADER:%.*]] [
+// APPROX-NEXT: i8 120, label [[IF_THEN5_I_I:%.*]]
+// APPROX-NEXT: i8 88, label [[IF_THEN5_I_I]]
// APPROX-NEXT: ]
-// APPROX: while.cond.i30.i.i.preheader:
-// APPROX-NEXT: br label [[WHILE_COND_I30_I_I:%.*]]
-// APPROX: while.cond.i30.i.i:
-// APPROX-NEXT: [[__TAGP_ADDR_0_I31_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I37_I_I:%.*]], [[CLEANUP_I36_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[WHILE_COND_I30_I_I_PREHEADER]] ]
-// APPROX-NEXT: [[__R_0_I32_I_I:%.*]] = phi i64 [ [[__R_2_I_I_I:%.*]], [[CLEANUP_I36_I_I]] ], [ 0, [[WHILE_COND_I30_I_I_PREHEADER]] ]
-// APPROX-NEXT: [[TMP2:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I31_I_I]], align 1, !tbaa [[TBAA4]]
-// APPROX-NEXT: [[CMP_NOT_I33_I_I:%.*]] = icmp eq i8 [[TMP2]], 0
-// APPROX-NEXT: br i1 [[CMP_NOT_I33_I_I]], label [[_ZL3NANPKC_EXIT:%.*]], label [[WHILE_BODY_I34_I_I:%.*]]
-// APPROX: while.body.i34.i.i:
-// APPROX-NEXT: [[TMP3:%.*]] = add i8 [[TMP2]], -48
-// APPROX-NEXT: [[OR_COND_I35_I_I:%.*]] = icmp ult i8 [[TMP3]], 10
-// APPROX-NEXT: br i1 [[OR_COND_I35_I_I]], label [[IF_END31_I_I_I:%.*]], label [[IF_ELSE_I_I_I:%.*]]
+// APPROX: while.cond.i.i.i.preheader:
+// APPROX-NEXT: [[TMP3:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// APPROX-NEXT: [[CMP_NOT_I_I_I14:%.*]] = icmp eq i8 [[TMP3]], 0
+// APPROX-NEXT: br i1 [[CMP_NOT_I_I_I14]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I_I_I:%.*]]
+// APPROX: if.then5.i.i:
+// APPROX-NEXT: [[TMP4:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// APPROX-NEXT: [[CMP_NOT_I30_I_I9:%.*]] = icmp eq i8 [[TMP4]], 0
+// APPROX-NEXT: br i1 [[CMP_NOT_I30_I_I9]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I31_I_I:%.*]]
+// APPROX: while.body.i31.i.i:
+// APPROX-NEXT: [[TMP5:%.*]] = phi i8 [ [[TMP9:%.*]], [[IF_END31_I_I_I:%.*]] ], [ [[TMP4]], [[IF_THEN5_I_I]] ]
+// APPROX-NEXT: [[__R_0_I29_I_I11:%.*]] = phi i64 [ [[ADD28_I_I_I:%.*]], [[IF_END31_I_I_I]] ], [ 0, [[IF_THEN5_I_I]] ]
+// APPROX-NEXT: [[__TAGP_ADDR_0_I28_I_I10:%.*]] = phi ptr [ [[INCDEC_PTR_I34_I_I:%.*]], [[IF_END31_I_I_I]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN5_I_I]] ]
+// APPROX-NEXT: [[TMP6:%.*]] = add i8 [[TMP5]], -48
+// APPROX-NEXT: [[OR_COND_I32_I_I:%.*]] = icmp ult i8 [[TMP6]], 10
+// APPROX-NEXT: br i1 [[OR_COND_I32_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE_I_I_I:%.*]]
// APPROX: if.else.i.i.i:
-// APPROX-NEXT: [[TMP4:%.*]] = add i8 [[TMP2]], -97
-// APPROX-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP4]], 6
+// APPROX-NEXT: [[TMP7:%.*]] = add i8 [[TMP5]], -97
+// APPROX-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP7]], 6
// APPROX-NEXT: br i1 [[OR_COND33_I_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE17_I_I_I:%.*]]
// APPROX: if.else17.i.i.i:
-// APPROX-NEXT: [[TMP5:%.*]] = add i8 [[TMP2]], -65
-// APPROX-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP5]], 6
-// APPROX-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[CLEANUP_I36_I_I]]
+// APPROX-NEXT: [[TMP8:%.*]] = add i8 [[TMP5]], -65
+// APPROX-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP8]], 6
+// APPROX-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[_ZL3NANPKC_EXIT]]
// APPROX: if.end31.i.i.i:
-// APPROX-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I34_I_I]] ], [ -87, [[IF_ELSE_I_I_I]] ], [ -55, [[IF_ELSE17_I_I_I]] ]
-// APPROX-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I32_I_I]], 4
-// APPROX-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP2]] to i64
+// APPROX-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I31_I_I]] ], [ -87, [[IF_ELSE_I_I_I]] ], [ -55, [[IF_ELSE17_I_I_I]] ]
+// APPROX-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I29_I_I11]], 4
+// APPROX-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP5]] to i64
// APPROX-NEXT: [[ADD26_I_I_I:%.*]] = add i64 [[MUL24_I_I_I]], [[DOTSINK]]
-// APPROX-NEXT: [[ADD28_I_I_I:%.*]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
-// APPROX-NEXT: [[INCDEC_PTR_I40_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I31_I_I]], i64 1
-// APPROX-NEXT: br label [[CLEANUP_I36_I_I]]
-// APPROX: cleanup.i36.i.i:
-// APPROX-NEXT: [[__TAGP_ADDR_1_I37_I_I]] = phi ptr [ [[INCDEC_PTR_I40_I_I]], [[IF_END31_I_I_I]] ], [ [[__TAGP_ADDR_0_I31_I_I]], [[IF_ELSE17_I_I_I]] ]
-// APPROX-NEXT: [[__R_2_I_I_I]] = phi i64 [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ [[__R_0_I32_I_I]], [[IF_ELSE17_I_I_I]] ]
-// APPROX-NEXT: [[COND_I_I_I:%.*]] = phi i1 [ true, [[IF_END31_I_I_I]] ], [ false, [[IF_ELSE17_I_I_I]] ]
-// APPROX-NEXT: br i1 [[COND_I_I_I]], label [[WHILE_COND_I30_I_I]], label [[_ZL3NANPKC_EXIT]], !llvm.loop [[LOOP11]]
-// APPROX: while.cond.i.i.i:
-// APPROX-NEXT: [[__TAGP_ADDR_0_I_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I_I_I:%.*]], [[CLEANUP_I_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN_I_I]] ]
-// APPROX-NEXT: [[__R_0_I_I_I:%.*]] = phi i64 [ [[__R_1_I_I_I:%.*]], [[CLEANUP_I_I_I]] ], [ 0, [[IF_THEN_I_I]] ]
-// APPROX-NEXT: [[TMP6:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I_I_I]], align 1, !tbaa [[TBAA4]]
-// APPROX-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP6]], 0
-// APPROX-NEXT: br i1 [[CMP_NOT_I_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I_I_I:%.*]]
+// APPROX-NEXT: [[ADD28_I_I_I]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
+// APPROX-NEXT: [[INCDEC_PTR_I34_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I28_I_I10]], i64 1
+// APPROX-NEXT: [[TMP9]] = load i8, ptr [[INCDEC_PTR_I34_I_I]], align 1, !tbaa [[TBAA4]]
+// APPROX-NEXT: [[CMP_NOT_I30_I_I:%.*]] = icmp eq i8 [[TMP9]], 0
+// APPROX-NEXT: br i1 [[CMP_NOT_I30_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I31_I_I]], !llvm.loop [[LOOP11]]
// APPROX: while.body.i.i.i:
-// APPROX-NEXT: [[TMP7:%.*]] = and i8 [[TMP6]], -8
-// APPROX-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP7]], 48
-// APPROX-NEXT: br i1 [[OR_COND_I_I_I]], label [[IF_THEN_I_I_I:%.*]], label [[CLEANUP_I_I_I]]
+// APPROX-NEXT: [[TMP10:%.*]] = phi i8 [ [[TMP12:%.*]], [[IF_THEN_I_I_I:%.*]] ], [ [[TMP3]], [[WHILE_COND_I_I_I_PREHEADER]] ]
+// APPROX-NEXT: [[__R_0_I_I_I16:%.*]] = phi i64 [ [[SUB_I_I_I:%.*]], [[IF_THEN_I_I_I]] ], [ 0, [[WHILE_COND_I_I_I_PREHEADER]] ]
+// APPROX-NEXT: [[__TAGP_ADDR_0_I_I_I15:%.*]] = phi ptr [ [[INCDEC_PTR_I_I_I:%.*]], [[IF_THEN_I_I_I]] ], [ [[INCDEC_PTR_I_I]], [[WHILE_COND_I_I_I_PREHEADER]] ]
+// APPROX-NEXT: [[TMP11:%.*]] = and i8 [[TMP10]], -8
+// APPROX-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP11]], 48
+// APPROX-NEXT: br i1 [[OR_COND_I_I_I]], label [[IF_THEN_I_I_I]], label [[_ZL3NANPKC_EXIT]]
// APPROX: if.then.i.i.i:
-// APPROX-NEXT: [[MUL_I_I_I:%.*]] = shl i64 [[__R_0_I_I_I]], 3
-// APPROX-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP6]] to i64
+// APPROX-NEXT: [[MUL_I_I_I:%.*]] = shl i64 [[__R_0_I_I_I16]], 3
+// APPROX-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP10]] to i64
// APPROX-NEXT: [[ADD_I_I_I:%.*]] = add i64 [[MUL_I_I_I]], -48
-// APPROX-NEXT: [[SUB_I_I_I:%.*]] = add i64 [[ADD_I_I_I]], [[CONV5_I_I_I]]
-// APPROX-NEXT: [[INCDEC_PTR_I_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I_I_I]], i64 1
-// APPROX-NEXT: br label [[CLEANUP_I_I_I]]
-// APPROX: cleanup.i.i.i:
-// APPROX-NEXT: [[__TAGP_ADDR_1_I_I_I]] = phi ptr [ [[INCDEC_PTR_I_I_I]], [[IF_THEN_I_I_I]] ], [ [[__TAGP_ADDR_0_I_I_I]], [[WHILE_BODY_I_I_I]] ]
-// APPROX-NEXT: [[__R_1_I_I_I]] = phi i64 [ [[SUB_I_I_I]], [[IF_THEN_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_BODY_I_I_I]] ]
-// APPROX-NEXT: br i1 [[OR_COND_I_I_I]], label [[WHILE_COND_I_I_I]], label [[_ZL3NANPKC_EXIT]], !llvm.loop [[LOOP7]]
-// APPROX: while.cond.i14.i.i:
-// APPROX-NEXT: [[__TAGP_ADDR_0_I15_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I21_I_I:%.*]], [[CLEANUP_I20_I_I:%.*]] ], [ [[TAG]], [[ENTRY:%.*]] ]
-// APPROX-NEXT: [[__R_0_I16_I_I:%.*]] = phi i64 [ [[__R_1_I22_I_I:%.*]], [[CLEANUP_I20_I_I]] ], [ 0, [[ENTRY]] ]
-// APPROX-NEXT: [[TMP8:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I15_I_I]], align 1, !tbaa [[TBAA4]]
-// APPROX-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP8]], 0
-// APPROX-NEXT: br i1 [[CMP_NOT_I17_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I18_I_I:%.*]]
+// APPROX-NEXT: [[SUB_I_I_I]] = add i64 [[ADD_I_I_I]], [[CONV5_I_I_I]]
+// APPROX-NEXT: [[INCDEC_PTR_I_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I_I_I15]], i64 1
+// APPROX-NEXT: [[TMP12]] = load i8, ptr [[INCDEC_PTR_I_I_I]], align 1, !tbaa [[TBAA4]]
+// APPROX-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP12]], 0
+// APPROX-NEXT: br i1 [[CMP_NOT_I_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I_I_I]], !llvm.loop [[LOOP7]]
// APPROX: while.body.i18.i.i:
-// APPROX-NEXT: [[TMP9:%.*]] = add i8 [[TMP8]], -48
-// APPROX-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP9]], 10
-// APPROX-NEXT: br i1 [[OR_COND_I19_I_I]], label [[IF_THEN_I24_I_I:%.*]], label [[CLEANUP_I20_I_I]]
-// APPROX: if.then.i24.i.i:
-// APPROX-NEXT: [[MUL_I25_I_I:%.*]] = mul i64 [[__R_0_I16_I_I]], 10
-// APPROX-NEXT: [[CONV5_I26_I_I:%.*]] = zext nneg i8 [[TMP8]] to i64
-// APPROX-NEXT: [[ADD_I27_I_I:%.*]] = add i64 [[MUL_I25_I_I]], -48
-// APPROX-NEXT: [[SUB_I28_I_I:%.*]] = add i64 [[ADD_I27_I_I]], [[CONV5_I26_I_I]]
-// APPROX-NEXT: [[INCDEC_PTR_I29_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I15_I_I]], i64 1
-// APPROX-NEXT: br label [[CLEANUP_I20_I_I]]
-// APPROX: cleanup.i20.i.i:
-// APPROX-NEXT: [[__TAGP_ADDR_1_I21_I_I]] = phi ptr [ [[INCDEC_PTR_I29_I_I]], [[IF_THEN_I24_I_I]] ], [ [[__TAGP_ADDR_0_I15_I_I]], [[WHILE_BODY_I18_I_I]] ]
-// APPROX-NEXT: [[__R_1_I22_I_I]] = phi i64 [ [[SUB_I28_I_I]], [[IF_THEN_I24_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_BODY_I18_I_I]] ]
-// APPROX-NEXT: br i1 [[OR_COND_I19_I_I]], label [[WHILE_COND_I14_I_I]], label [[_ZL3NANPKC_EXIT]], !llvm.loop [[LOOP10]]
+// APPROX-NEXT: [[TMP13:%.*]] = phi i8 [ [[TMP15:%.*]], [[IF_THEN_I21_I_I:%.*]] ], [ [[TMP1]], [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// APPROX-NEXT: [[__R_0_I16_I_I7:%.*]] = phi i64 [ [[SUB_I25_I_I:%.*]], [[IF_THEN_I21_I_I]] ], [ 0, [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// APPROX-NEXT: [[__TAGP_ADDR_0_I15_I_I6:%.*]] = phi ptr [ [[INCDEC_PTR_I26_I_I:%.*]], [[IF_THEN_I21_I_I]] ], [ [[TAG]], [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// APPROX-NEXT: [[TMP14:%.*]] = add i8 [[TMP13]], -48
+// APPROX-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP14]], 10
+// APPROX-NEXT: br i1 [[OR_COND_I19_I_I]], label [[IF_THEN_I21_I_I]], label [[_ZL3NANPKC_EXIT]]
+// APPROX: if.then.i21.i.i:
+// APPROX-NEXT: [[MUL_I22_I_I:%.*]] = mul i64 [[__R_0_I16_I_I7]], 10
+// APPROX-NEXT: [[CONV5_I23_I_I:%.*]] = zext nneg i8 [[TMP13]] to i64
+// APPROX-NEXT: [[ADD_I24_I_I:%.*]] = add i64 [[MUL_I22_I_I]], -48
+// APPROX-NEXT: [[SUB_I25_I_I]] = add i64 [[ADD_I24_I_I]], [[CONV5_I23_I_I]]
+// APPROX-NEXT: [[INCDEC_PTR_I26_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I15_I_I6]], i64 1
+// APPROX-NEXT: [[TMP15]] = load i8, ptr [[INCDEC_PTR_I26_I_I]], align 1, !tbaa [[TBAA4]]
+// APPROX-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP15]], 0
+// APPROX-NEXT: br i1 [[CMP_NOT_I17_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I18_I_I]], !llvm.loop [[LOOP10]]
// APPROX: _ZL3nanPKc.exit:
-// APPROX-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[CLEANUP_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_COND_I_I_I]] ], [ 0, [[CLEANUP_I36_I_I]] ], [ [[__R_0_I32_I_I]], [[WHILE_COND_I30_I_I]] ], [ 0, [[CLEANUP_I20_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_COND_I14_I_I]] ]
+// APPROX-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[WHILE_COND_I_I_I_PREHEADER]] ], [ 0, [[IF_THEN5_I_I]] ], [ 0, [[WHILE_COND_I14_I_I_PREHEADER]] ], [ [[SUB_I_I_I]], [[IF_THEN_I_I_I]] ], [ 0, [[WHILE_BODY_I_I_I]] ], [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ 0, [[IF_ELSE17_I_I_I]] ], [ [[SUB_I25_I_I]], [[IF_THEN_I21_I_I]] ], [ 0, [[WHILE_BODY_I18_I_I]] ]
// APPROX-NEXT: [[BF_VALUE_I:%.*]] = and i64 [[RETVAL_0_I_I]], 2251799813685247
// APPROX-NEXT: [[BF_SET9_I:%.*]] = or disjoint i64 [[BF_VALUE_I]], 9221120237041090560
-// APPROX-NEXT: [[TMP10:%.*]] = bitcast i64 [[BF_SET9_I]] to double
-// APPROX-NEXT: ret double [[TMP10]]
+// APPROX-NEXT: [[TMP16:%.*]] = bitcast i64 [[BF_SET9_I]] to double
+// APPROX-NEXT: ret double [[TMP16]]
//
// NCRDIV-LABEL: @test_nan(
// NCRDIV-NEXT: entry:
// NCRDIV-NEXT: [[TMP0:%.*]] = load i8, ptr [[TAG:%.*]], align 1, !tbaa [[TBAA4]]
// NCRDIV-NEXT: [[CMP_I_I:%.*]] = icmp eq i8 [[TMP0]], 48
-// NCRDIV-NEXT: br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[WHILE_COND_I14_I_I:%.*]]
+// NCRDIV-NEXT: br i1 [[CMP_I_I]], label [[IF_THEN_I_I:%.*]], label [[WHILE_COND_I14_I_I_PREHEADER:%.*]]
+// NCRDIV: while.cond.i14.i.i.preheader:
+// NCRDIV-NEXT: [[TMP1:%.*]] = load i8, ptr [[TAG]], align 1, !tbaa [[TBAA4]]
+// NCRDIV-NEXT: [[CMP_NOT_I17_I_I5:%.*]] = icmp eq i8 [[TMP1]], 0
+// NCRDIV-NEXT: br i1 [[CMP_NOT_I17_I_I5]], label [[_ZL3NANPKC_EXIT:%.*]], label [[WHILE_BODY_I18_I_I:%.*]]
// NCRDIV: if.then.i.i:
// NCRDIV-NEXT: [[INCDEC_PTR_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[TAG]], i64 1
-// NCRDIV-NEXT: [[TMP1:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
-// NCRDIV-NEXT: switch i8 [[TMP1]], label [[WHILE_COND_I_I_I:%.*]] [
-// NCRDIV-NEXT: i8 120, label [[WHILE_COND_I30_I_I_PREHEADER:%.*]]
-// NCRDIV-NEXT: i8 88, label [[WHILE_COND_I30_I_I_PREHEADER]]
+// NCRDIV-NEXT: [[TMP2:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// NCRDIV-NEXT: switch i8 [[TMP2]], label [[WHILE_COND_I_I_I_PREHEADER:%.*]] [
+// NCRDIV-NEXT: i8 120, label [[IF_THEN5_I_I:%.*]]
+// NCRDIV-NEXT: i8 88, label [[IF_THEN5_I_I]]
// NCRDIV-NEXT: ]
-// NCRDIV: while.cond.i30.i.i.preheader:
-// NCRDIV-NEXT: br label [[WHILE_COND_I30_I_I:%.*]]
-// NCRDIV: while.cond.i30.i.i:
-// NCRDIV-NEXT: [[__TAGP_ADDR_0_I31_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I37_I_I:%.*]], [[CLEANUP_I36_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[WHILE_COND_I30_I_I_PREHEADER]] ]
-// NCRDIV-NEXT: [[__R_0_I32_I_I:%.*]] = phi i64 [ [[__R_2_I_I_I:%.*]], [[CLEANUP_I36_I_I]] ], [ 0, [[WHILE_COND_I30_I_I_PREHEADER]] ]
-// NCRDIV-NEXT: [[TMP2:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I31_I_I]], align 1, !tbaa [[TBAA4]]
-// NCRDIV-NEXT: [[CMP_NOT_I33_I_I:%.*]] = icmp eq i8 [[TMP2]], 0
-// NCRDIV-NEXT: br i1 [[CMP_NOT_I33_I_I]], label [[_ZL3NANPKC_EXIT:%.*]], label [[WHILE_BODY_I34_I_I:%.*]]
-// NCRDIV: while.body.i34.i.i:
-// NCRDIV-NEXT: [[TMP3:%.*]] = add i8 [[TMP2]], -48
-// NCRDIV-NEXT: [[OR_COND_I35_I_I:%.*]] = icmp ult i8 [[TMP3]], 10
-// NCRDIV-NEXT: br i1 [[OR_COND_I35_I_I]], label [[IF_END31_I_I_I:%.*]], label [[IF_ELSE_I_I_I:%.*]]
+// NCRDIV: while.cond.i.i.i.preheader:
+// NCRDIV-NEXT: [[TMP3:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// NCRDIV-NEXT: [[CMP_NOT_I_I_I14:%.*]] = icmp eq i8 [[TMP3]], 0
+// NCRDIV-NEXT: br i1 [[CMP_NOT_I_I_I14]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I_I_I:%.*]]
+// NCRDIV: if.then5.i.i:
+// NCRDIV-NEXT: [[TMP4:%.*]] = load i8, ptr [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA4]]
+// NCRDIV-NEXT: [[CMP_NOT_I30_I_I9:%.*]] = icmp eq i8 [[TMP4]], 0
+// NCRDIV-NEXT: br i1 [[CMP_NOT_I30_I_I9]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I31_I_I:%.*]]
+// NCRDIV: while.body.i31.i.i:
+// NCRDIV-NEXT: [[TMP5:%.*]] = phi i8 [ [[TMP9:%.*]], [[IF_END31_I_I_I:%.*]] ], [ [[TMP4]], [[IF_THEN5_I_I]] ]
+// NCRDIV-NEXT: [[__R_0_I29_I_I11:%.*]] = phi i64 [ [[ADD28_I_I_I:%.*]], [[IF_END31_I_I_I]] ], [ 0, [[IF_THEN5_I_I]] ]
+// NCRDIV-NEXT: [[__TAGP_ADDR_0_I28_I_I10:%.*]] = phi ptr [ [[INCDEC_PTR_I34_I_I:%.*]], [[IF_END31_I_I_I]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN5_I_I]] ]
+// NCRDIV-NEXT: [[TMP6:%.*]] = add i8 [[TMP5]], -48
+// NCRDIV-NEXT: [[OR_COND_I32_I_I:%.*]] = icmp ult i8 [[TMP6]], 10
+// NCRDIV-NEXT: br i1 [[OR_COND_I32_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE_I_I_I:%.*]]
// NCRDIV: if.else.i.i.i:
-// NCRDIV-NEXT: [[TMP4:%.*]] = add i8 [[TMP2]], -97
-// NCRDIV-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP4]], 6
+// NCRDIV-NEXT: [[TMP7:%.*]] = add i8 [[TMP5]], -97
+// NCRDIV-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP7]], 6
// NCRDIV-NEXT: br i1 [[OR_COND33_I_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE17_I_I_I:%.*]]
// NCRDIV: if.else17.i.i.i:
-// NCRDIV-NEXT: [[TMP5:%.*]] = add i8 [[TMP2]], -65
-// NCRDIV-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP5]], 6
-// NCRDIV-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[CLEANUP_I36_I_I]]
+// NCRDIV-NEXT: [[TMP8:%.*]] = add i8 [[TMP5]], -65
+// NCRDIV-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP8]], 6
+// NCRDIV-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[_ZL3NANPKC_EXIT]]
// NCRDIV: if.end31.i.i.i:
-// NCRDIV-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I34_I_I]] ], [ -87, [[IF_ELSE_I_I_I]] ], [ -55, [[IF_ELSE17_I_I_I]] ]
-// NCRDIV-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I32_I_I]], 4
-// NCRDIV-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP2]] to i64
+// NCRDIV-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I31_I_I]] ], [ -87, [[IF_ELSE_I_I_I]] ], [ -55, [[IF_ELSE17_I_I_I]] ]
+// NCRDIV-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I29_I_I11]], 4
+// NCRDIV-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP5]] to i64
// NCRDIV-NEXT: [[ADD26_I_I_I:%.*]] = add i64 [[MUL24_I_I_I]], [[DOTSINK]]
-// NCRDIV-NEXT: [[ADD28_I_I_I:%.*]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
-// NCRDIV-NEXT: [[INCDEC_PTR_I40_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I31_I_I]], i64 1
-// NCRDIV-NEXT: br label [[CLEANUP_I36_I_I]]
-// NCRDIV: cleanup.i36.i.i:
-// NCRDIV-NEXT: [[__TAGP_ADDR_1_I37_I_I]] = phi ptr [ [[INCDEC_PTR_I40_I_I]], [[IF_END31_I_I_I]] ], [ [[__TAGP_ADDR_0_I31_I_I]], [[IF_ELSE17_I_I_I]] ]
-// NCRDIV-NEXT: [[__R_2_I_I_I]] = phi i64 [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ [[__R_0_I32_I_I]], [[IF_ELSE17_I_I_I]] ]
-// NCRDIV-NEXT: [[COND_I_I_I:%.*]] = phi i1 [ true, [[IF_END31_I_I_I]] ], [ false, [[IF_ELSE17_I_I_I]] ]
-// NCRDIV-NEXT: br i1 [[COND_I_I_I]], label [[WHILE_COND_I30_I_I]], label [[_ZL3NANPKC_EXIT]], !llvm.loop [[LOOP11]]
-// NCRDIV: while.cond.i.i.i:
-// NCRDIV-NEXT: [[__TAGP_ADDR_0_I_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I_I_I:%.*]], [[CLEANUP_I_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN_I_I]] ]
-// NCRDIV-NEXT: [[__R_0_I_I_I:%.*]] = phi i64 [ [[__R_1_I_I_I:%.*]], [[CLEANUP_I_I_I]] ], [ 0, [[IF_THEN_I_I]] ]
-// NCRDIV-NEXT: [[TMP6:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I_I_I]], align 1, !tbaa [[TBAA4]]
-// NCRDIV-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP6]], 0
-// NCRDIV-NEXT: br i1 [[CMP_NOT_I_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I_I_I:%.*]]
+// NCRDIV-NEXT: [[ADD28_I_I_I]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
+// NCRDIV-NEXT: [[INCDEC_PTR_I34_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I28_I_I10]], i64 1
+// NCRDIV-NEXT: [[TMP9]] = load i8, ptr [[INCDEC_PTR_I34_I_I]], align 1, !tbaa [[TBAA4]]
+// NCRDIV-NEXT: [[CMP_NOT_I30_I_I:%.*]] = icmp eq i8 [[TMP9]], 0
+// NCRDIV-NEXT: br i1 [[CMP_NOT_I30_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I31_I_I]], !llvm.loop [[LOOP11]]
// NCRDIV: while.body.i.i.i:
-// NCRDIV-NEXT: [[TMP7:%.*]] = and i8 [[TMP6]], -8
-// NCRDIV-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP7]], 48
-// NCRDIV-NEXT: br i1 [[OR_COND_I_I_I]], label [[IF_THEN_I_I_I:%.*]], label [[CLEANUP_I_I_I]]
+// NCRDIV-NEXT: [[TMP10:%.*]] = phi i8 [ [[TMP12:%.*]], [[IF_THEN_I_I_I:%.*]] ], [ [[TMP3]], [[WHILE_COND_I_I_I_PREHEADER]] ]
+// NCRDIV-NEXT: [[__R_0_I_I_I16:%.*]] = phi i64 [ [[SUB_I_I_I:%.*]], [[IF_THEN_I_I_I]] ], [ 0, [[WHILE_COND_I_I_I_PREHEADER]] ]
+// NCRDIV-NEXT: [[__TAGP_ADDR_0_I_I_I15:%.*]] = phi ptr [ [[INCDEC_PTR_I_I_I:%.*]], [[IF_THEN_I_I_I]] ], [ [[INCDEC_PTR_I_I]], [[WHILE_COND_I_I_I_PREHEADER]] ]
+// NCRDIV-NEXT: [[TMP11:%.*]] = and i8 [[TMP10]], -8
+// NCRDIV-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP11]], 48
+// NCRDIV-NEXT: br i1 [[OR_COND_I_I_I]], label [[IF_THEN_I_I_I]], label [[_ZL3NANPKC_EXIT]]
// NCRDIV: if.then.i.i.i:
-// NCRDIV-NEXT: [[MUL_I_I_I:%.*]] = shl i64 [[__R_0_I_I_I]], 3
-// NCRDIV-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP6]] to i64
+// NCRDIV-NEXT: [[MUL_I_I_I:%.*]] = shl i64 [[__R_0_I_I_I16]], 3
+// NCRDIV-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP10]] to i64
// NCRDIV-NEXT: [[ADD_I_I_I:%.*]] = add i64 [[MUL_I_I_I]], -48
-// NCRDIV-NEXT: [[SUB_I_I_I:%.*]] = add i64 [[ADD_I_I_I]], [[CONV5_I_I_I]]
-// NCRDIV-NEXT: [[INCDEC_PTR_I_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I_I_I]], i64 1
-// NCRDIV-NEXT: br label [[CLEANUP_I_I_I]]
-// NCRDIV: cleanup.i.i.i:
-// NCRDIV-NEXT: [[__TAGP_ADDR_1_I_I_I]] = phi ptr [ [[INCDEC_PTR_I_I_I]], [[IF_THEN_I_I_I]] ], [ [[__TAGP_ADDR_0_I_I_I]], [[WHILE_BODY_I_I_I]] ]
-// NCRDIV-NEXT: [[__R_1_I_I_I]] = phi i64 [ [[SUB_I_I_I]], [[IF_THEN_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_BODY_I_I_I]] ]
-// NCRDIV-NEXT: br i1 [[OR_COND_I_I_I]], label [[WHILE_COND_I_I_I]], label [[_ZL3NANPKC_EXIT]], !llvm.loop [[LOOP7]]
-// NCRDIV: while.cond.i14.i.i:
-// NCRDIV-NEXT: [[__TAGP_ADDR_0_I15_I_I:%.*]] = phi ptr [ [[__TAGP_ADDR_1_I21_I_I:%.*]], [[CLEANUP_I20_I_I:%.*]] ], [ [[TAG]], [[ENTRY:%.*]] ]
-// NCRDIV-NEXT: [[__R_0_I16_I_I:%.*]] = phi i64 [ [[__R_1_I22_I_I:%.*]], [[CLEANUP_I20_I_I]] ], [ 0, [[ENTRY]] ]
-// NCRDIV-NEXT: [[TMP8:%.*]] = load i8, ptr [[__TAGP_ADDR_0_I15_I_I]], align 1, !tbaa [[TBAA4]]
-// NCRDIV-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP8]], 0
-// NCRDIV-NEXT: br i1 [[CMP_NOT_I17_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I18_I_I:%.*]]
+// NCRDIV-NEXT: [[SUB_I_I_I]] = add i64 [[ADD_I_I_I]], [[CONV5_I_I_I]]
+// NCRDIV-NEXT: [[INCDEC_PTR_I_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I_I_I15]], i64 1
+// NCRDIV-NEXT: [[TMP12]] = load i8, ptr [[INCDEC_PTR_I_I_I]], align 1, !tbaa [[TBAA4]]
+// NCRDIV-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP12]], 0
+// NCRDIV-NEXT: br i1 [[CMP_NOT_I_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I_I_I]], !llvm.loop [[LOOP7]]
// NCRDIV: while.body.i18.i.i:
-// NCRDIV-NEXT: [[TMP9:%.*]] = add i8 [[TMP8]], -48
-// NCRDIV-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP9]], 10
-// NCRDIV-NEXT: br i1 [[OR_COND_I19_I_I]], label [[IF_THEN_I24_I_I:%.*]], label [[CLEANUP_I20_I_I]]
-// NCRDIV: if.then.i24.i.i:
-// NCRDIV-NEXT: [[MUL_I25_I_I:%.*]] = mul i64 [[__R_0_I16_I_I]], 10
-// NCRDIV-NEXT: [[CONV5_I26_I_I:%.*]] = zext nneg i8 [[TMP8]] to i64
-// NCRDIV-NEXT: [[ADD_I27_I_I:%.*]] = add i64 [[MUL_I25_I_I]], -48
-// NCRDIV-NEXT: [[SUB_I28_I_I:%.*]] = add i64 [[ADD_I27_I_I]], [[CONV5_I26_I_I]]
-// NCRDIV-NEXT: [[INCDEC_PTR_I29_I_I:%.*]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I15_I_I]], i64 1
-// NCRDIV-NEXT: br label [[CLEANUP_I20_I_I]]
-// NCRDIV: cleanup.i20.i.i:
-// NCRDIV-NEXT: [[__TAGP_ADDR_1_I21_I_I]] = phi ptr [ [[INCDEC_PTR_I29_I_I]], [[IF_THEN_I24_I_I]] ], [ [[__TAGP_ADDR_0_I15_I_I]], [[WHILE_BODY_I18_I_I]] ]
-// NCRDIV-NEXT: [[__R_1_I22_I_I]] = phi i64 [ [[SUB_I28_I_I]], [[IF_THEN_I24_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_BODY_I18_I_I]] ]
-// NCRDIV-NEXT: br i1 [[OR_COND_I19_I_I]], label [[WHILE_COND_I14_I_I]], label [[_ZL3NANPKC_EXIT]], !llvm.loop [[LOOP10]]
+// NCRDIV-NEXT: [[TMP13:%.*]] = phi i8 [ [[TMP15:%.*]], [[IF_THEN_I21_I_I:%.*]] ], [ [[TMP1]], [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// NCRDIV-NEXT: [[__R_0_I16_I_I7:%.*]] = phi i64 [ [[SUB_I25_I_I:%.*]], [[IF_THEN_I21_I_I]] ], [ 0, [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// NCRDIV-NEXT: [[__TAGP_ADDR_0_I15_I_I6:%.*]] = phi ptr [ [[INCDEC_PTR_I26_I_I:%.*]], [[IF_THEN_I21_I_I]] ], [ [[TAG]], [[WHILE_COND_I14_I_I_PREHEADER]] ]
+// NCRDIV-NEXT: [[TMP14:%.*]] = add i8 [[TMP13]], -48
+// NCRDIV-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP14]], 10
+// NCRDIV-NEXT: br i1 [[OR_COND_I19_I_I]], label [[IF_THEN_I21_I_I]], label [[_ZL3NANPKC_EXIT]]
+// NCRDIV: if.then.i21.i.i:
+// NCRDIV-NEXT: [[MUL_I22_I_I:%.*]] = mul i64 [[__R_0_I16_I_I7]], 10
+// NCRDIV-NEXT: [[CONV5_I23_I_I:%.*]] = zext nneg i8 [[TMP13]] to i64
+// NCRDIV-NEXT: [[ADD_I24_I_I:%.*]] = add i64 [[MUL_I22_I_I]], -48
+// NCRDIV-NEXT: [[SUB_I25_I_I]] = add i64 [[ADD_I24_I_I]], [[CONV5_I23_I_I]]
+// NCRDIV-NEXT: [[INCDEC_PTR_I26_I_I]] = getelementptr inbounds nuw i8, ptr [[__TAGP_ADDR_0_I15_I_I6]], i64 1
+// NCRDIV-NEXT: [[TMP15]] = load i8, ptr [[INCDEC_PTR_I26_I_I]], align 1, !tbaa [[TBAA4]]
+// NCRDIV-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP15]], 0
+// NCRDIV-NEXT: br i1 [[CMP_NOT_I17_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I18_I_I]], !llvm.loop [[LOOP10]]
// NCRDIV: _ZL3nanPKc.exit:
-// NCRDIV-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[CLEANUP_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_COND_I_I_I]] ], [ 0, [[CLEANUP_I36_I_I]] ], [ [[__R_0_I32_I_I]], [[WHILE_COND_I30_I_I]] ], [ 0, [[CLEANUP_I20_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_COND_I14_I_I]] ]
+// NCRDIV-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[WHILE_COND_I_I_I_PREHEADER]] ], [ 0, [[IF_THEN5_I_I]] ], [ 0, [[WHILE_COND_I14_I_I_PREHEADER]] ], [ [[SUB_I_I_I]], [[IF_THEN_I_I_I]] ], [ 0, [[WHILE_BODY_I_I_I]] ], [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ 0, [[IF_ELSE17_I_I_I]] ], [ [[SUB_I25_I_I]], [[IF_THEN_I21_I_I]] ], [ 0, [[WHILE_BODY_I18_I_I]] ]
// NCRDIV-NEXT: [[BF_VALUE_I:%.*]] = and i64 [[RETVAL_0_I_I]], 2251799813685247
// NCRDIV-NEXT: [[BF_SET9_I:%.*]] = or disjoint i64 [[BF_VALUE_I]], 9221120237041090560
-// NCRDIV-NEXT: [[TMP10:%.*]] = bitcast i64 [[BF_SET9_I]] to double
-// NCRDIV-NEXT: ret double [[TMP10]]
+// NCRDIV-NEXT: [[TMP16:%.*]] = bitcast i64 [[BF_SET9_I]] to double
+// NCRDIV-NEXT: ret double [[TMP16]]
//
// AMDGCNSPIRV-LABEL: @test_nan(
// AMDGCNSPIRV-NEXT: entry:
@@ -4442,53 +4378,49 @@ extern "C" __device__ float test_nanf(const char *tag) {
// AMDGCNSPIRV-NEXT: [[INCDEC_PTR_I_I:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[TAG]], i64 1
// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = load i8, ptr addrspace(4) [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA5]]
// AMDGCNSPIRV-NEXT: switch i8 [[TMP1]], label [[WHILE_COND_I_I_I:%.*]] [
-// AMDGCNSPIRV-NEXT: i8 120, label [[WHILE_COND_I28_I_I_PREHEADER:%.*]]
-// AMDGCNSPIRV-NEXT: i8 88, label [[WHILE_COND_I28_I_I_PREHEADER]]
+// AMDGCNSPIRV-NEXT: i8 120, label [[IF_THEN5_I_I:%.*]]
+// AMDGCNSPIRV-NEXT: i8 88, label [[IF_THEN5_I_I]]
// AMDGCNSPIRV-NEXT: ]
-// AMDGCNSPIRV: while.cond.i28.i.i.preheader:
-// AMDGCNSPIRV-NEXT: br label [[WHILE_COND_I28_I_I:%.*]]
-// AMDGCNSPIRV: while.cond.i28.i.i:
-// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_0_I29_I_I:%.*]] = phi ptr addrspace(4) [ [[__TAGP_ADDR_1_I34_I_I:%.*]], [[CLEANUP_I_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[WHILE_COND_I28_I_I_PREHEADER]] ]
-// AMDGCNSPIRV-NEXT: [[__R_0_I30_I_I:%.*]] = phi i64 [ [[__R_2_I_I_I:%.*]], [[CLEANUP_I_I_I]] ], [ 0, [[WHILE_COND_I28_I_I_PREHEADER]] ]
-// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I29_I_I]], align 1, !tbaa [[TBAA5]]
-// AMDGCNSPIRV-NEXT: [[CMP_NOT_I31_I_I:%.*]] = icmp eq i8 [[TMP2]], 0
-// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I31_I_I]], label [[_ZL3NANPKC_EXIT:%.*]], label [[WHILE_BODY_I32_I_I:%.*]]
+// AMDGCNSPIRV: if.then5.i.i:
+// AMDGCNSPIRV-NEXT: [[TMP2:%.*]] = load i8, ptr addrspace(4) [[INCDEC_PTR_I_I]], align 1, !tbaa [[TBAA5]]
+// AMDGCNSPIRV-NEXT: [[CMP_NOT_I31_I_I5:%.*]] = icmp eq i8 [[TMP2]], 0
+// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I31_I_I5]], label [[_ZL3NANPKC_EXIT:%.*]], label [[WHILE_BODY_I32_I_I:%.*]]
// AMDGCNSPIRV: while.body.i32.i.i:
-// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = add i8 [[TMP2]], -48
-// AMDGCNSPIRV-NEXT: [[OR_COND_I33_I_I:%.*]] = icmp ult i8 [[TMP3]], 10
-// AMDGCNSPIRV-NEXT: br i1 [[OR_COND_I33_I_I]], label [[IF_END31_I_I_I:%.*]], label [[IF_ELSE_I_I_I:%.*]]
+// AMDGCNSPIRV-NEXT: [[TMP3:%.*]] = phi i8 [ [[TMP7:%.*]], [[IF_END31_I_I_I:%.*]] ], [ [[TMP2]], [[IF_THEN5_I_I]] ]
+// AMDGCNSPIRV-NEXT: [[__R_0_I30_I_I7:%.*]] = phi i64 [ [[ADD28_I_I_I:%.*]], [[IF_END31_I_I_I]] ], [ 0, [[IF_THEN5_I_I]] ]
+// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_0_I29_I_I6:%.*]] = phi ptr addrspace(4) [ [[INCDEC_PTR_I36_I_I:%.*]], [[IF_END31_I_I_I]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN5_I_I]] ]
+// AMDGCNSPIRV-NEXT: [[TMP4:%.*]] = add i8 [[TMP3]], -48
+// AMDGCNSPIRV-NEXT: [[OR_COND_I33_I_I:%.*]] = icmp ult i8 [[TMP4]], 10
+// AMDGCNSPIRV-NEXT: br i1 [[OR_COND_I33_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE_I_I_I:%.*]]
// AMDGCNSPIRV: if.else.i.i.i:
-// AMDGCNSPIRV-NEXT: [[TMP4:%.*]] = add i8 [[TMP2]], -97
-// AMDGCNSPIRV-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP4]], 6
+// AMDGCNSPIRV-NEXT: [[TMP5:%.*]] = add i8 [[TMP3]], -97
+// AMDGCNSPIRV-NEXT: [[OR_COND33_I_I_I:%.*]] = icmp ult i8 [[TMP5]], 6
// AMDGCNSPIRV-NEXT: br i1 [[OR_COND33_I_I_I]], label [[IF_END31_I_I_I]], label [[IF_ELSE17_I_I_I:%.*]]
// AMDGCNSPIRV: if.else17.i.i.i:
-// AMDGCNSPIRV-NEXT: [[TMP5:%.*]] = add i8 [[TMP2]], -65
-// AMDGCNSPIRV-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP5]], 6
-// AMDGCNSPIRV-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[CLEANUP_I_I_I]]
+// AMDGCNSPIRV-NEXT: [[TMP6:%.*]] = add i8 [[TMP3]], -65
+// AMDGCNSPIRV-NEXT: [[OR_COND34_I_I_I:%.*]] = icmp ult i8 [[TMP6]], 6
+// AMDGCNSPIRV-NEXT: br i1 [[OR_COND34_I_I_I]], label [[IF_END31_I_I_I]], label [[_ZL3NANPKC_EXIT]]
// AMDGCNSPIRV: if.end31.i.i.i:
// AMDGCNSPIRV-NEXT: [[DOTSINK:%.*]] = phi i64 [ -48, [[WHILE_BODY_I32_I_I]] ], [ -87, [[IF_ELSE_I_I_I]] ], [ -55, [[IF_ELSE17_I_I_I]] ]
-// AMDGCNSPIRV-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I30_I_I]], 4
-// AMDGCNSPIRV-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP2]] to i64
+// AMDGCNSPIRV-NEXT: [[MUL24_I_I_I:%.*]] = shl i64 [[__R_0_I30_I_I7]], 4
+// AMDGCNSPIRV-NEXT: [[CONV25_I_I_I:%.*]] = zext nneg i8 [[TMP3]] to i64
// AMDGCNSPIRV-NEXT: [[ADD26_I_I_I:%.*]] = add i64 [[MUL24_I_I_I]], [[DOTSINK]]
-// AMDGCNSPIRV-NEXT: [[ADD28_I_I_I:%.*]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
-// AMDGCNSPIRV-NEXT: [[INCDEC_PTR_I37_I_I:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[__TAGP_ADDR_0_I29_I_I]], i64 1
-// AMDGCNSPIRV-NEXT: br label [[CLEANUP_I_I_I]]
-// AMDGCNSPIRV: cleanup.i.i.i:
-// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_1_I34_I_I]] = phi ptr addrspace(4) [ [[INCDEC_PTR_I37_I_I]], [[IF_END31_I_I_I]] ], [ [[__TAGP_ADDR_0_I29_I_I]], [[IF_ELSE17_I_I_I]] ]
-// AMDGCNSPIRV-NEXT: [[__R_2_I_I_I]] = phi i64 [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ [[__R_0_I30_I_I]], [[IF_ELSE17_I_I_I]] ]
-// AMDGCNSPIRV-NEXT: [[COND_I_I_I:%.*]] = phi i1 [ true, [[IF_END31_I_I_I]] ], [ false, [[IF_ELSE17_I_I_I]] ]
-// AMDGCNSPIRV-NEXT: br i1 [[COND_I_I_I]], label [[WHILE_COND_I28_I_I]], label [[_ZL3NANPKC_EXIT]], !llvm.loop [[LOOP12]]
+// AMDGCNSPIRV-NEXT: [[ADD28_I_I_I]] = add i64 [[ADD26_I_I_I]], [[CONV25_I_I_I]]
+// AMDGCNSPIRV-NEXT: [[INCDEC_PTR_I36_I_I]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[__TAGP_ADDR_0_I29_I_I6]], i64 1
+// AMDGCNSPIRV-NEXT: [[TMP7]] = load i8, ptr addrspace(4) [[INCDEC_PTR_I36_I_I]], align 1, !tbaa [[TBAA5]]
+// AMDGCNSPIRV-NEXT: [[CMP_NOT_I31_I_I:%.*]] = icmp eq i8 [[TMP7]], 0
+// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I31_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I32_I_I]], !llvm.loop [[LOOP12]]
// AMDGCNSPIRV: while.cond.i.i.i:
// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_0_I_I_I:%.*]] = phi ptr addrspace(4) [ [[__TAGP_ADDR_1_I_I_I:%.*]], [[WHILE_BODY_I_I_I:%.*]] ], [ [[INCDEC_PTR_I_I]], [[IF_THEN_I_I]] ]
// AMDGCNSPIRV-NEXT: [[__R_0_I_I_I:%.*]] = phi i64 [ [[__R_1_I_I_I:%.*]], [[WHILE_BODY_I_I_I]] ], [ 0, [[IF_THEN_I_I]] ]
-// AMDGCNSPIRV-NEXT: [[TMP6:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I_I_I]], align 1, !tbaa [[TBAA5]]
-// AMDGCNSPIRV-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP6]], 0
+// AMDGCNSPIRV-NEXT: [[TMP8:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I_I_I]], align 1, !tbaa [[TBAA5]]
+// AMDGCNSPIRV-NEXT: [[CMP_NOT_I_I_I:%.*]] = icmp eq i8 [[TMP8]], 0
// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I_I_I]]
// AMDGCNSPIRV: while.body.i.i.i:
-// AMDGCNSPIRV-NEXT: [[TMP7:%.*]] = and i8 [[TMP6]], -8
-// AMDGCNSPIRV-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP7]], 48
+// AMDGCNSPIRV-NEXT: [[TMP9:%.*]] = and i8 [[TMP8]], -8
+// AMDGCNSPIRV-NEXT: [[OR_COND_I_I_I:%.*]] = icmp eq i8 [[TMP9]], 48
// AMDGCNSPIRV-NEXT: [[MUL_I_I_I:%.*]] = shl i64 [[__R_0_I_I_I]], 3
-// AMDGCNSPIRV-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP6]] to i64
+// AMDGCNSPIRV-NEXT: [[CONV5_I_I_I:%.*]] = zext nneg i8 [[TMP8]] to i64
// AMDGCNSPIRV-NEXT: [[ADD_I_I_I:%.*]] = add i64 [[MUL_I_I_I]], -48
// AMDGCNSPIRV-NEXT: [[SUB_I_I_I:%.*]] = add i64 [[ADD_I_I_I]], [[CONV5_I_I_I]]
// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_1_I_I_I_IDX:%.*]] = zext i1 [[OR_COND_I_I_I]] to i64
@@ -4498,14 +4430,14 @@ extern "C" __device__ float test_nanf(const char *tag) {
// AMDGCNSPIRV: while.cond.i14.i.i:
// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_0_I15_I_I:%.*]] = phi ptr addrspace(4) [ [[__TAGP_ADDR_1_I25_I_I:%.*]], [[WHILE_BODY_I18_I_I:%.*]] ], [ [[TAG]], [[ENTRY:%.*]] ]
// AMDGCNSPIRV-NEXT: [[__R_0_I16_I_I:%.*]] = phi i64 [ [[__R_1_I26_I_I:%.*]], [[WHILE_BODY_I18_I_I]] ], [ 0, [[ENTRY]] ]
-// AMDGCNSPIRV-NEXT: [[TMP8:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I15_I_I]], align 1, !tbaa [[TBAA5]]
-// AMDGCNSPIRV-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP8]], 0
+// AMDGCNSPIRV-NEXT: [[TMP10:%.*]] = load i8, ptr addrspace(4) [[__TAGP_ADDR_0_I15_I_I]], align 1, !tbaa [[TBAA5]]
+// AMDGCNSPIRV-NEXT: [[CMP_NOT_I17_I_I:%.*]] = icmp eq i8 [[TMP10]], 0
// AMDGCNSPIRV-NEXT: br i1 [[CMP_NOT_I17_I_I]], label [[_ZL3NANPKC_EXIT]], label [[WHILE_BODY_I18_I_I]]
// AMDGCNSPIRV: while.body.i18.i.i:
-// AMDGCNSPIRV-NEXT: [[TMP9:%.*]] = add i8 [[TMP8]], -48
-// AMDGCNSPIRV-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP9]], 10
+// AMDGCNSPIRV-NEXT: [[TMP11:%.*]] = add i8 [[TMP10]], -48
+// AMDGCNSPIRV-NEXT: [[OR_COND_I19_I_I:%.*]] = icmp ult i8 [[TMP11]], 10
// AMDGCNSPIRV-NEXT: [[MUL_I20_I_I:%.*]] = mul i64 [[__R_0_I16_I_I]], 10
-// AMDGCNSPIRV-NEXT: [[CONV5_I21_I_I:%.*]] = zext nneg i8 [[TMP8]] to i64
+// AMDGCNSPIRV-NEXT: [[CONV5_I21_I_I:%.*]] = zext nneg i8 [[TMP10]] to i64
// AMDGCNSPIRV-NEXT: [[ADD_I22_I_I:%.*]] = add i64 [[MUL_I20_I_I]], -48
// AMDGCNSPIRV-NEXT: [[SUB_I23_I_I:%.*]] = add i64 [[ADD_I22_I_I]], [[CONV5_I21_I_I]]
// AMDGCNSPIRV-NEXT: [[__TAGP_ADDR_1_I25_I_I_IDX:%.*]] = zext i1 [[OR_COND_I19_I_I]] to i64
@@ -4513,11 +4445,11 @@ extern "C" __device__ float test_nanf(const char *tag) {
// AMDGCNSPIRV-NEXT: [[__R_1_I26_I_I]] = select i1 [[OR_COND_I19_I_I]], i64 [[SUB_I23_I_I]], i64 [[__R_0_I16_I_I]]
// AMDGCNSPIRV-NEXT: br i1 [[OR_COND_I19_I_I]], label [[WHILE_COND_I14_I_I]], label [[_ZL3NANPKC_EXIT]], !llvm.loop [[LOOP11]]
// AMDGCNSPIRV: _ZL3nanPKc.exit:
-// AMDGCNSPIRV-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[WHILE_BODY_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_COND_I_I_I]] ], [ 0, [[CLEANUP_I_I_I]] ], [ [[__R_0_I30_I_I]], [[WHILE_COND_I28_I_I]] ], [ 0, [[WHILE_BODY_I18_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_COND_I14_I_I]] ]
+// AMDGCNSPIRV-NEXT: [[RETVAL_0_I_I:%.*]] = phi i64 [ 0, [[IF_THEN5_I_I]] ], [ 0, [[WHILE_BODY_I_I_I]] ], [ [[__R_0_I_I_I]], [[WHILE_COND_I_I_I]] ], [ [[ADD28_I_I_I]], [[IF_END31_I_I_I]] ], [ 0, [[IF_ELSE17_I_I_I]] ], [ 0, [[WHILE_BODY_I18_I_I]] ], [ [[__R_0_I16_I_I]], [[WHILE_COND_I14_I_I]] ]
// AMDGCNSPIRV-NEXT: [[BF_VALUE_I:%.*]] = and i64 [[RETVAL_0_I_I]], 2251799813685247
// AMDGCNSPIRV-NEXT: [[BF_SET9_I:%.*]] = or disjoint i64 [[BF_VALUE_I]], 9221120237041090560
-// AMDGCNSPIRV-NEXT: [[TMP10:%.*]] = bitcast i64 [[BF_SET9_I]] to double
-// AMDGCNSPIRV-NEXT: ret double [[TMP10]]
+// AMDGCNSPIRV-NEXT: [[TMP12:%.*]] = bitcast i64 [[BF_SET9_I]] to double
+// AMDGCNSPIRV-NEXT: ret double [[TMP12]]
//
extern "C" __device__ double test_nan(const char *tag) {
return nan(tag);
@@ -4981,11 +4913,13 @@ extern "C" __device__ double test_normcdfinv(double x) {
// DEFAULT-NEXT: [[ADD_I]] = fadd contract float [[__R_0_I4]], [[MUL_I]]
// DEFAULT-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr [[__A_ADDR_0_I3]], i64 4
// DEFAULT-NEXT: [[TOBOOL_NOT_I:%.*]] = icmp eq i32 [[DEC_I]], 0
-// DEFAULT-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL5NORMFIPKF_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP20:![0-9]+]]
+// DEFAULT-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL5NORMFIPKF_EXIT_LOOPEXIT:%.*]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP20:![0-9]+]]
+// DEFAULT: _ZL5normfiPKf.exit.loopexit:
+// DEFAULT-NEXT: [[TMP1:%.*]] = tail call contract float @llvm.sqrt.f32(float [[ADD_I]])
+// DEFAULT-NEXT: br label [[_ZL5NORMFIPKF_EXIT]]
// DEFAULT: _ZL5normfiPKf.exit:
-// DEFAULT-NEXT: [[__R_0_I_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD_I]], [[WHILE_BODY_I]] ]
-// DEFAULT-NEXT: [[TMP1:%.*]] = tail call contract noundef float @llvm.sqrt.f32(float [[__R_0_I_LCSSA]])
-// DEFAULT-NEXT: ret float [[TMP1]]
+// DEFAULT-NEXT: [[__R_0_I_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP1]], [[_ZL5NORMFIPKF_EXIT_LOOPEXIT]] ]
+// DEFAULT-NEXT: ret float [[__R_0_I_LCSSA]]
//
// FINITEONLY-LABEL: @test_normf(
// FINITEONLY-NEXT: entry:
@@ -5001,11 +4935,13 @@ extern "C" __device__ double test_normcdfinv(double x) {
// FINITEONLY-NEXT: [[ADD_I]] = fadd nnan ninf contract float [[__R_0_I4]], [[MUL_I]]
// FINITEONLY-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr [[__A_ADDR_0_I3]], i64 4
// FINITEONLY-NEXT: [[TOBOOL_NOT_I:%.*]] = icmp eq i32 [[DEC_I]], 0
-// FINITEONLY-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL5NORMFIPKF_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP20:![0-9]+]]
+// FINITEONLY-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL5NORMFIPKF_EXIT_LOOPEXIT:%.*]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP20:![0-9]+]]
+// FINITEONLY: _ZL5normfiPKf.exit.loopexit:
+// FINITEONLY-NEXT: [[TMP1:%.*]] = tail call nnan ninf contract float @llvm.sqrt.f32(float [[ADD_I]])
+// FINITEONLY-NEXT: br label [[_ZL5NORMFIPKF_EXIT]]
// FINITEONLY: _ZL5normfiPKf.exit:
-// FINITEONLY-NEXT: [[__R_0_I_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD_I]], [[WHILE_BODY_I]] ]
-// FINITEONLY-NEXT: [[TMP1:%.*]] = tail call nnan ninf contract noundef float @llvm.sqrt.f32(float [[__R_0_I_LCSSA]])
-// FINITEONLY-NEXT: ret float [[TMP1]]
+// FINITEONLY-NEXT: [[__R_0_I_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP1]], [[_ZL5NORMFIPKF_EXIT_LOOPEXIT]] ]
+// FINITEONLY-NEXT: ret float [[__R_0_I_LCSSA]]
//
// APPROX-LABEL: @test_normf(
// APPROX-NEXT: entry:
@@ -5021,11 +4957,13 @@ extern "C" __device__ double test_normcdfinv(double x) {
// APPROX-NEXT: [[ADD_I]] = fadd contract float [[__R_0_I4]], [[MUL_I]]
// APPROX-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr [[__A_ADDR_0_I3]], i64 4
// APPROX-NEXT: [[TOBOOL_NOT_I:%.*]] = icmp eq i32 [[DEC_I]], 0
-// APPROX-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL5NORMFIPKF_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP20:![0-9]+]]
+// APPROX-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL5NORMFIPKF_EXIT_LOOPEXIT:%.*]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP20:![0-9]+]]
+// APPROX: _ZL5normfiPKf.exit.loopexit:
+// APPROX-NEXT: [[TMP1:%.*]] = tail call contract float @llvm.sqrt.f32(float [[ADD_I]])
+// APPROX-NEXT: br label [[_ZL5NORMFIPKF_EXIT]]
// APPROX: _ZL5normfiPKf.exit:
-// APPROX-NEXT: [[__R_0_I_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD_I]], [[WHILE_BODY_I]] ]
-// APPROX-NEXT: [[TMP1:%.*]] = tail call contract noundef float @llvm.sqrt.f32(float [[__R_0_I_LCSSA]])
-// APPROX-NEXT: ret float [[TMP1]]
+// APPROX-NEXT: [[__R_0_I_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP1]], [[_ZL5NORMFIPKF_EXIT_LOOPEXIT]] ]
+// APPROX-NEXT: ret float [[__R_0_I_LCSSA]]
//
// NCRDIV-LABEL: @test_normf(
// NCRDIV-NEXT: entry:
@@ -5041,11 +4979,13 @@ extern "C" __device__ double test_normcdfinv(double x) {
// NCRDIV-NEXT: [[ADD_I]] = fadd contract float [[__R_0_I4]], [[MUL_I]]
// NCRDIV-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr [[__A_ADDR_0_I3]], i64 4
// NCRDIV-NEXT: [[TOBOOL_NOT_I:%.*]] = icmp eq i32 [[DEC_I]], 0
-// NCRDIV-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL5NORMFIPKF_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP21:![0-9]+]]
+// NCRDIV-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL5NORMFIPKF_EXIT_LOOPEXIT:%.*]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP21:![0-9]+]]
+// NCRDIV: _ZL5normfiPKf.exit.loopexit:
+// NCRDIV-NEXT: [[TMP1:%.*]] = tail call contract float @llvm.sqrt.f32(float [[ADD_I]])
+// NCRDIV-NEXT: br label [[_ZL5NORMFIPKF_EXIT]]
// NCRDIV: _ZL5normfiPKf.exit:
-// NCRDIV-NEXT: [[__R_0_I_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD_I]], [[WHILE_BODY_I]] ]
-// NCRDIV-NEXT: [[TMP1:%.*]] = tail call contract noundef float @llvm.sqrt.f32(float [[__R_0_I_LCSSA]]), !fpmath [[META22:![0-9]+]]
-// NCRDIV-NEXT: ret float [[TMP1]]
+// NCRDIV-NEXT: [[__R_0_I_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP1]], [[_ZL5NORMFIPKF_EXIT_LOOPEXIT]] ]
+// NCRDIV-NEXT: ret float [[__R_0_I_LCSSA]]
//
// AMDGCNSPIRV-LABEL: @test_normf(
// AMDGCNSPIRV-NEXT: entry:
@@ -5061,11 +5001,13 @@ extern "C" __device__ double test_normcdfinv(double x) {
// AMDGCNSPIRV-NEXT: [[ADD_I]] = fadd contract float [[__R_0_I4]], [[MUL_I]]
// AMDGCNSPIRV-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[__A_ADDR_0_I3]], i64 4
// AMDGCNSPIRV-NEXT: [[TOBOOL_NOT_I:%.*]] = icmp eq i32 [[DEC_I]], 0
-// AMDGCNSPIRV-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL5NORMFIPKF_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP21:![0-9]+]]
+// AMDGCNSPIRV-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL5NORMFIPKF_EXIT_LOOPEXIT:%.*]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP21:![0-9]+]]
+// AMDGCNSPIRV: _ZL5normfiPKf.exit.loopexit:
+// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = tail call contract addrspace(4) float @llvm.sqrt.f32(float [[ADD_I]])
+// AMDGCNSPIRV-NEXT: br label [[_ZL5NORMFIPKF_EXIT]]
// AMDGCNSPIRV: _ZL5normfiPKf.exit:
-// AMDGCNSPIRV-NEXT: [[__R_0_I_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD_I]], [[WHILE_BODY_I]] ]
-// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = tail call contract noundef addrspace(4) float @llvm.sqrt.f32(float [[__R_0_I_LCSSA]])
-// AMDGCNSPIRV-NEXT: ret float [[TMP1]]
+// AMDGCNSPIRV-NEXT: [[__R_0_I_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[TMP1]], [[_ZL5NORMFIPKF_EXIT_LOOPEXIT]] ]
+// AMDGCNSPIRV-NEXT: ret float [[__R_0_I_LCSSA]]
//
extern "C" __device__ float test_normf(int x, const float *y) {
return normf(x, y);
@@ -5085,11 +5027,13 @@ extern "C" __device__ float test_normf(int x, const float *y) {
// DEFAULT-NEXT: [[ADD_I]] = fadd contract double [[__R_0_I4]], [[MUL_I]]
// DEFAULT-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr [[__A_ADDR_0_I3]], i64 8
// DEFAULT-NEXT: [[TOBOOL_NOT_I:%.*]] = icmp eq i32 [[DEC_I]], 0
-// DEFAULT-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL4NORMIPKD_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP21:![0-9]+]]
+// DEFAULT-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL4NORMIPKD_EXIT_LOOPEXIT:%.*]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP21:![0-9]+]]
+// DEFAULT: _ZL4normiPKd.exit.loopexit:
+// DEFAULT-NEXT: [[TMP1:%.*]] = tail call contract double @llvm.sqrt.f64(double [[ADD_I]])
+// DEFAULT-NEXT: br label [[_ZL4NORMIPKD_EXIT]]
// DEFAULT: _ZL4normiPKd.exit:
-// DEFAULT-NEXT: [[__R_0_I_LCSSA:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[ADD_I]], [[WHILE_BODY_I]] ]
-// DEFAULT-NEXT: [[TMP1:%.*]] = tail call contract noundef double @llvm.sqrt.f64(double [[__R_0_I_LCSSA]])
-// DEFAULT-NEXT: ret double [[TMP1]]
+// DEFAULT-NEXT: [[__R_0_I_LCSSA:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP1]], [[_ZL4NORMIPKD_EXIT_LOOPEXIT]] ]
+// DEFAULT-NEXT: ret double [[__R_0_I_LCSSA]]
//
// FINITEONLY-LABEL: @test_norm(
// FINITEONLY-NEXT: entry:
@@ -5105,11 +5049,13 @@ extern "C" __device__ float test_normf(int x, const float *y) {
// FINITEONLY-NEXT: [[ADD_I]] = fadd nnan ninf contract double [[__R_0_I4]], [[MUL_I]]
// FINITEONLY-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr [[__A_ADDR_0_I3]], i64 8
// FINITEONLY-NEXT: [[TOBOOL_NOT_I:%.*]] = icmp eq i32 [[DEC_I]], 0
-// FINITEONLY-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL4NORMIPKD_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP21:![0-9]+]]
+// FINITEONLY-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL4NORMIPKD_EXIT_LOOPEXIT:%.*]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP21:![0-9]+]]
+// FINITEONLY: _ZL4normiPKd.exit.loopexit:
+// FINITEONLY-NEXT: [[TMP1:%.*]] = tail call nnan ninf contract double @llvm.sqrt.f64(double [[ADD_I]])
+// FINITEONLY-NEXT: br label [[_ZL4NORMIPKD_EXIT]]
// FINITEONLY: _ZL4normiPKd.exit:
-// FINITEONLY-NEXT: [[__R_0_I_LCSSA:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[ADD_I]], [[WHILE_BODY_I]] ]
-// FINITEONLY-NEXT: [[TMP1:%.*]] = tail call nnan ninf contract noundef double @llvm.sqrt.f64(double [[__R_0_I_LCSSA]])
-// FINITEONLY-NEXT: ret double [[TMP1]]
+// FINITEONLY-NEXT: [[__R_0_I_LCSSA:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP1]], [[_ZL4NORMIPKD_EXIT_LOOPEXIT]] ]
+// FINITEONLY-NEXT: ret double [[__R_0_I_LCSSA]]
//
// APPROX-LABEL: @test_norm(
// APPROX-NEXT: entry:
@@ -5125,11 +5071,13 @@ extern "C" __device__ float test_normf(int x, const float *y) {
// APPROX-NEXT: [[ADD_I]] = fadd contract double [[__R_0_I4]], [[MUL_I]]
// APPROX-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr [[__A_ADDR_0_I3]], i64 8
// APPROX-NEXT: [[TOBOOL_NOT_I:%.*]] = icmp eq i32 [[DEC_I]], 0
-// APPROX-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL4NORMIPKD_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP21:![0-9]+]]
+// APPROX-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL4NORMIPKD_EXIT_LOOPEXIT:%.*]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP21:![0-9]+]]
+// APPROX: _ZL4normiPKd.exit.loopexit:
+// APPROX-NEXT: [[TMP1:%.*]] = tail call contract double @llvm.sqrt.f64(double [[ADD_I]])
+// APPROX-NEXT: br label [[_ZL4NORMIPKD_EXIT]]
// APPROX: _ZL4normiPKd.exit:
-// APPROX-NEXT: [[__R_0_I_LCSSA:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[ADD_I]], [[WHILE_BODY_I]] ]
-// APPROX-NEXT: [[TMP1:%.*]] = tail call contract noundef double @llvm.sqrt.f64(double [[__R_0_I_LCSSA]])
-// APPROX-NEXT: ret double [[TMP1]]
+// APPROX-NEXT: [[__R_0_I_LCSSA:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP1]], [[_ZL4NORMIPKD_EXIT_LOOPEXIT]] ]
+// APPROX-NEXT: ret double [[__R_0_I_LCSSA]]
//
// NCRDIV-LABEL: @test_norm(
// NCRDIV-NEXT: entry:
@@ -5145,11 +5093,13 @@ extern "C" __device__ float test_normf(int x, const float *y) {
// NCRDIV-NEXT: [[ADD_I]] = fadd contract double [[__R_0_I4]], [[MUL_I]]
// NCRDIV-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr [[__A_ADDR_0_I3]], i64 8
// NCRDIV-NEXT: [[TOBOOL_NOT_I:%.*]] = icmp eq i32 [[DEC_I]], 0
-// NCRDIV-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL4NORMIPKD_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP23:![0-9]+]]
+// NCRDIV-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL4NORMIPKD_EXIT_LOOPEXIT:%.*]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP22:![0-9]+]]
+// NCRDIV: _ZL4normiPKd.exit.loopexit:
+// NCRDIV-NEXT: [[TMP1:%.*]] = tail call contract double @llvm.sqrt.f64(double [[ADD_I]])
+// NCRDIV-NEXT: br label [[_ZL4NORMIPKD_EXIT]]
// NCRDIV: _ZL4normiPKd.exit:
-// NCRDIV-NEXT: [[__R_0_I_LCSSA:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[ADD_I]], [[WHILE_BODY_I]] ]
-// NCRDIV-NEXT: [[TMP1:%.*]] = tail call contract noundef double @llvm.sqrt.f64(double [[__R_0_I_LCSSA]])
-// NCRDIV-NEXT: ret double [[TMP1]]
+// NCRDIV-NEXT: [[__R_0_I_LCSSA:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP1]], [[_ZL4NORMIPKD_EXIT_LOOPEXIT]] ]
+// NCRDIV-NEXT: ret double [[__R_0_I_LCSSA]]
//
// AMDGCNSPIRV-LABEL: @test_norm(
// AMDGCNSPIRV-NEXT: entry:
@@ -5165,11 +5115,13 @@ extern "C" __device__ float test_normf(int x, const float *y) {
// AMDGCNSPIRV-NEXT: [[ADD_I]] = fadd contract double [[__R_0_I4]], [[MUL_I]]
// AMDGCNSPIRV-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[__A_ADDR_0_I3]], i64 8
// AMDGCNSPIRV-NEXT: [[TOBOOL_NOT_I:%.*]] = icmp eq i32 [[DEC_I]], 0
-// AMDGCNSPIRV-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL4NORMIPKD_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP22:![0-9]+]]
+// AMDGCNSPIRV-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL4NORMIPKD_EXIT_LOOPEXIT:%.*]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP22:![0-9]+]]
+// AMDGCNSPIRV: _ZL4normiPKd.exit.loopexit:
+// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = tail call contract addrspace(4) double @llvm.sqrt.f64(double [[ADD_I]])
+// AMDGCNSPIRV-NEXT: br label [[_ZL4NORMIPKD_EXIT]]
// AMDGCNSPIRV: _ZL4normiPKd.exit:
-// AMDGCNSPIRV-NEXT: [[__R_0_I_LCSSA:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[ADD_I]], [[WHILE_BODY_I]] ]
-// AMDGCNSPIRV-NEXT: [[TMP1:%.*]] = tail call contract noundef addrspace(4) double @llvm.sqrt.f64(double [[__R_0_I_LCSSA]])
-// AMDGCNSPIRV-NEXT: ret double [[TMP1]]
+// AMDGCNSPIRV-NEXT: [[__R_0_I_LCSSA:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP1]], [[_ZL4NORMIPKD_EXIT_LOOPEXIT]] ]
+// AMDGCNSPIRV-NEXT: ret double [[__R_0_I_LCSSA]]
//
extern "C" __device__ double test_norm(int x, const double *y) {
return norm(x, y);
@@ -5707,7 +5659,7 @@ extern "C" __device__ double test_rint(double x) {
// NCRDIV-NEXT: [[ADD_I]] = fadd contract float [[__R_0_I4]], [[MUL_I]]
// NCRDIV-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr [[__A_ADDR_0_I3]], i64 4
// NCRDIV-NEXT: [[TOBOOL_NOT_I:%.*]] = icmp eq i32 [[DEC_I]], 0
-// NCRDIV-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL6RNORMFIPKF_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP24:![0-9]+]]
+// NCRDIV-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL6RNORMFIPKF_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP23:![0-9]+]]
// NCRDIV: _ZL6rnormfiPKf.exit:
// NCRDIV-NEXT: [[__R_0_I_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD_I]], [[WHILE_BODY_I]] ]
// NCRDIV-NEXT: [[CALL_I:%.*]] = tail call contract noundef float @__ocml_rsqrt_f32(float noundef [[__R_0_I_LCSSA]]) #[[ATTR15]]
@@ -5811,7 +5763,7 @@ extern "C" __device__ float test_rnormf(int x, const float* y) {
// NCRDIV-NEXT: [[ADD_I]] = fadd contract double [[__R_0_I4]], [[MUL_I]]
// NCRDIV-NEXT: [[INCDEC_PTR_I]] = getelementptr inbounds nuw i8, ptr [[__A_ADDR_0_I3]], i64 8
// NCRDIV-NEXT: [[TOBOOL_NOT_I:%.*]] = icmp eq i32 [[DEC_I]], 0
-// NCRDIV-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL5RNORMIPKD_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP25:![0-9]+]]
+// NCRDIV-NEXT: br i1 [[TOBOOL_NOT_I]], label [[_ZL5RNORMIPKD_EXIT]], label [[WHILE_BODY_I]], !llvm.loop [[LOOP24:![0-9]+]]
// NCRDIV: _ZL5rnormiPKd.exit:
// NCRDIV-NEXT: [[__R_0_I_LCSSA:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[ADD_I]], [[WHILE_BODY_I]] ]
// NCRDIV-NEXT: [[CALL_I:%.*]] = tail call contract noundef double @__ocml_rsqrt_f64(double noundef [[__R_0_I_LCSSA]]) #[[ATTR15]]
@@ -6616,7 +6568,7 @@ extern "C" __device__ double test_sinpi(double x) {
//
// NCRDIV-LABEL: @test_sqrtf(
// NCRDIV-NEXT: entry:
-// NCRDIV-NEXT: [[TMP0:%.*]] = tail call contract noundef float @llvm.sqrt.f32(float [[X:%.*]]), !fpmath [[META22]]
+// NCRDIV-NEXT: [[TMP0:%.*]] = tail call contract noundef float @llvm.sqrt.f32(float [[X:%.*]]), !fpmath [[META25:![0-9]+]]
// NCRDIV-NEXT: ret float [[TMP0]]
//
// AMDGCNSPIRV-LABEL: @test_sqrtf(
diff --git a/clang/test/Headers/__cpuidex_conflict.c b/clang/test/Headers/__cpuidex_conflict.c
index 49795c4..74f4532 100644
--- a/clang/test/Headers/__cpuidex_conflict.c
+++ b/clang/test/Headers/__cpuidex_conflict.c
@@ -1,19 +1,17 @@
// Make sure that __cpuidex in cpuid.h doesn't conflict with the MS
// extensions built in by ensuring compilation succeeds:
-// RUN: %clang_cc1 %s -ffreestanding -fms-extensions -fms-compatibility \
-// RUN: -fms-compatibility-version=19.00 -triple x86_64-pc-windows-msvc -emit-llvm -o -
-// %clang_cc1 %s -ffreestanding -triple x86_64-w64-windows-gnu -fms-extensions -emit-llvm -o -
-//
-// FIXME: See https://github.com/llvm/llvm-project/pull/121839 and
-// FIXME: https://github.com/llvm/llvm-project/pull/126324
-// RUN: not %clang_cc1 %s -ffreestanding -fopenmp -fopenmp-is-target-device -aux-triple x86_64-unknown-linux-gnu
+// RUN: %clang_cc1 %s -DIS_STATIC="" -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=19.00 -triple x86_64-pc-windows-msvc -emit-llvm -o -
+// RUN: %clang_cc1 %s -DIS_STATIC="" -ffreestanding -triple x86_64-w64-windows-gnu -fms-extensions -emit-llvm -o -
+
+// Ensure that we do not run into conflicts when offloading.
+// RUN: %clang_cc1 %s -DIS_STATIC=static -ffreestanding -fopenmp -fopenmp-is-target-device -aux-triple x86_64-unknown-linux-gnu
typedef __SIZE_TYPE__ size_t;
// We declare __cpuidex here as where the buitlin should be exposed (MSVC), the
// declaration is in <intrin.h>, but <intrin.h> is not available from all the
// targets that are being tested here.
-void __cpuidex (int[4], int, int);
+IS_STATIC void __cpuidex (int[4], int, int);
#include <cpuid.h>
@@ -22,4 +20,3 @@ int cpuid_info[4];
void test_cpuidex(unsigned level, unsigned count) {
__cpuidex(cpuid_info, level, count);
}
-
diff --git a/clang/test/Misc/target-invalid-cpu-note/nvptx.c b/clang/test/Misc/target-invalid-cpu-note/nvptx.c
index b5209ff..b90f26e 100644
--- a/clang/test/Misc/target-invalid-cpu-note/nvptx.c
+++ b/clang/test/Misc/target-invalid-cpu-note/nvptx.c
@@ -30,8 +30,12 @@
// CHECK-SAME: {{^}}, sm_100a
// CHECK-SAME: {{^}}, sm_101
// CHECK-SAME: {{^}}, sm_101a
+// CHECK-SAME: {{^}}, sm_103
+// CHECK-SAME: {{^}}, sm_103a
// CHECK-SAME: {{^}}, sm_120
// CHECK-SAME: {{^}}, sm_120a
+// CHECK-SAME: {{^}}, sm_121
+// CHECK-SAME: {{^}}, sm_121a
// CHECK-SAME: {{^}}, gfx600
// CHECK-SAME: {{^}}, gfx601
// CHECK-SAME: {{^}}, gfx602
diff --git a/clang/test/PCH/debug-info-pch-container-path.c b/clang/test/PCH/debug-info-pch-container-path.c
index 257cbf5..19b1a28 100644
--- a/clang/test/PCH/debug-info-pch-container-path.c
+++ b/clang/test/PCH/debug-info-pch-container-path.c
@@ -9,6 +9,7 @@
// RUN: -triple %itanium_abi_triple \
// RUN: -fdebug-prefix-map=%t=BUILD \
// RUN: -fdebug-prefix-map=%S=SOURCE \
+// RUN: -fdebug-compilation-dir=%t \
// RUN: -o %t/prefix.ll %S/debug-info-limited-struct.h \
// RUN: -mllvm -debug-only=pchcontainer &>%t-container.ll
// RUN: cat %t-container.ll | FileCheck %s
diff --git a/clang/test/PCH/debug-info-pch-path.c b/clang/test/PCH/debug-info-pch-path.c
index f94d2fa..22b367f 100644
--- a/clang/test/PCH/debug-info-pch-path.c
+++ b/clang/test/PCH/debug-info-pch-path.c
@@ -65,7 +65,8 @@
// RUN: %clang_cc1 -debug-info-kind=standalone \
// RUN: -dwarf-ext-refs -fmodule-format=obj \
// RUN: -triple %itanium_abi_triple \
-// RUN: -include-pch %t/prefix.pch %s -emit-llvm -o %t.abs.ll %s
+// RUN: -include-pch %t/prefix.pch %s -emit-llvm \
+// RUN: -fdebug-compilation-dir=%t -o %t.abs.ll %s
// RUN: cat %t.abs.ll | FileCheck %s --check-prefix=CHECK-ABS
// CHECK-ABS: !DICompileUnit
diff --git a/clang/test/Preprocessor/wasm-target-features.c b/clang/test/Preprocessor/wasm-target-features.c
index 71b7cf6..3edaf9e 100644
--- a/clang/test/Preprocessor/wasm-target-features.c
+++ b/clang/test/Preprocessor/wasm-target-features.c
@@ -53,6 +53,15 @@
// FP16: #define __wasm_fp16__ 1{{$}}
// RUN: %clang -E -dM %s -o - 2>&1 \
+// RUN: -target wasm32-unknown-unknown -mgc \
+// RUN: | FileCheck %s -check-prefix=GC
+// RUN: %clang -E -dM %s -o - 2>&1 \
+// RUN: -target wasm64-unknown-unknown -mgc \
+// RUN: | FileCheck %s -check-prefix=GC
+//
+// GC: #define __wasm_gc__ 1{{$}}
+
+// RUN: %clang -E -dM %s -o - 2>&1 \
// RUN: -target wasm32-unknown-unknown -mmultimemory \
// RUN: | FileCheck %s -check-prefix=MULTIMEMORY
// RUN: %clang -E -dM %s -o - 2>&1 \
@@ -145,6 +154,7 @@
// MVP-NOT: #define __wasm_exception_handling__ 1{{$}}
// MVP-NOT: #define __wasm_extended_const__ 1{{$}}
// MVP-NOT: #define __wasm_fp16__ 1{{$}}
+// MVP-NOT: #define __wasm_gc__ 1{{$}}
// MVP-NOT: #define __wasm_multimemory__ 1{{$}}
// MVP-NOT: #define __wasm_multivalue__ 1{{$}}
// MVP-NOT: #define __wasm_mutable_globals__ 1{{$}}
@@ -181,6 +191,7 @@
// GENERIC-NOT: #define __wasm_exception_handling__ 1{{$}}
// GENERIC-NOT: #define __wasm_extended_const__ 1{{$}}
// GENERIC-NOT: #define __wasm__fp16__ 1{{$}}
+// GENERIC-NOT: #define __wasm_gc__ 1{{$}}
// GENERIC-NOT: #define __wasm_multimemory__ 1{{$}}
// GENERIC-NOT: #define __wasm_relaxed_simd__ 1{{$}}
// GENERIC-NOT: #define __wasm_simd128__ 1{{$}}
@@ -199,6 +210,7 @@
// BLEEDING-EDGE-INCLUDE-DAG: #define __wasm_exception_handling__ 1{{$}}
// BLEEDING-EDGE-INCLUDE-DAG: #define __wasm_extended_const__ 1{{$}}
// BLEEDING-EDGE-INCLUDE-DAG: #define __wasm_fp16__ 1{{$}}
+// BLEEDING-EDGE-INCLUDE-DAG: #define __wasm_gc__ 1{{$}}
// BLEEDING-EDGE-INCLUDE-DAG: #define __wasm_multimemory__ 1{{$}}
// BLEEDING-EDGE-INCLUDE-DAG: #define __wasm_multivalue__ 1{{$}}
// BLEEDING-EDGE-INCLUDE-DAG: #define __wasm_mutable_globals__ 1{{$}}
diff --git a/clang/test/Profile/coverage-prefix-map.c b/clang/test/Profile/coverage-prefix-map.c
index de9e377..1cb095f 100644
--- a/clang/test/Profile/coverage-prefix-map.c
+++ b/clang/test/Profile/coverage-prefix-map.c
@@ -25,7 +25,7 @@
// COVERAGE-PREFIX-MAP-ORDER: @__llvm_coverage_mapping = {{.*"\\02.*newpath.*root.*nested.*coverage-prefix-map\.c}}
// Test that last -fcoverage-prefix-map option (-fcoverage-prefix-map=%/t/root=.) is applied.
-// RUN: %clang_cc1 -fprofile-instrument=clang -fcoverage-mapping -emit-llvm -mllvm -enable-name-compression=false -main-file-name coverage-prefix-map.c %t/root/nested/coverage-prefix-map.c -fcoverage-prefix-map==newpath -fcoverage-prefix-map=%/t/root=. -o - | FileCheck --check-prefix=COVERAGE-PREFIX-MAP-REORDER %s
+// RUN: %clang_cc1 -fprofile-instrument=clang -fcoverage-mapping -emit-llvm -mllvm -enable-name-compression=false -main-file-name coverage-prefix-map.c %t/root/nested/coverage-prefix-map.c -fcoverage-compilation-dir=%t/root -fcoverage-prefix-map==newpath -fcoverage-prefix-map=%/t/root=. -o - | FileCheck --check-prefix=COVERAGE-PREFIX-MAP-REORDER %s
// COVERAGE-PREFIX-MAP-REORDER: @__llvm_coverage_mapping =
// COVERAGE-PREFIX-MAP-REORDER-NOT: newpath
// COVERAGE-PREFIX-MAP-REORDER-SAME: nested{{.*coverage-prefix-map\.c}}
diff --git a/clang/test/Sema/constexpr-void-cast.c b/clang/test/Sema/constexpr-void-cast.c
index 2ffc59f..cac671e 100644
--- a/clang/test/Sema/constexpr-void-cast.c
+++ b/clang/test/Sema/constexpr-void-cast.c
@@ -4,15 +4,16 @@
// RUN: %clang_cc1 -x c -fsyntax-only %s -pedantic -verify=c-pedantic -std=c11 -fexperimental-new-constant-interpreter
//
// RUN: %clang_cc1 -x c++ -fsyntax-only %s -verify=cxx
-// RUN: %clang_cc1 -x c++ -fsyntax-only %s -pedantic -verify=cxx-pedantic
+// RUN: %clang_cc1 -x c++ -fsyntax-only %s -pedantic -verify=cxx,cxx-pedantic
// RUN: %clang_cc1 -x c++ -fsyntax-only %s -verify=cxx -fexperimental-new-constant-interpreter
-// RUN: %clang_cc1 -x c++ -fsyntax-only %s -pedantic -verify=cxx-pedantic -fexperimental-new-constant-interpreter
+// RUN: %clang_cc1 -x c++ -fsyntax-only %s -pedantic -verify=cxx,cxx-pedantic -fexperimental-new-constant-interpreter
// c-no-diagnostics
-// cxx-no-diagnostics
void f(void);
struct S {char c;} s;
_Static_assert(&s != (void *)&f, ""); // c-pedantic-warning {{not an integer constant expression}} \
// c-pedantic-note {{this conversion is not allowed in a constant expression}} \
+ // cxx-error {{static assertion expression is not an integral constant expression}} \
+ // cxx-note {{cast that performs the conversions of a reinterpret_cast is not allowed in a constant expression}} \
// cxx-pedantic-warning {{'_Static_assert' is a C11 extension}}
diff --git a/clang/test/Sema/warn-unreachable_crash.cpp b/clang/test/Sema/warn-unreachable_crash.cpp
index 628abcc..1955c2c 100644
--- a/clang/test/Sema/warn-unreachable_crash.cpp
+++ b/clang/test/Sema/warn-unreachable_crash.cpp
@@ -1,16 +1,33 @@
-// RUN: %clang_cc1 -verify -Wunreachable-code %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -verify -Wunreachable-code %s
+// RUN: %clang_cc1 -triple aarch64-unknown-linux-gnu -target-feature +fullfp16 -verify -Wunreachable-code %s
+// REQUIRES: aarch64-registered-target
-// Previously this test will crash
-static void test(__fp16& x) {
- if (x != 0 || x != 1.0) { // expected-note{{}} no-crash
- x = 0.9;
- } else
- x = 0.8; // expected-warning{{code will never be executed}}
+// ======= __fp16 version =======
+static void test_fp16(__fp16 &x) {
+ if (x != 0 || x != 1.0) { // expected-note {{}} no-crash
+ x = 0.9;
+ } else
+ x = 0.8; // expected-warning{{code will never be executed}}
}
-static void test2(__fp16& x) {
- if (x != 1 && x == 1.0) { // expected-note{{}} no-crash
- x = 0.9; // expected-warning{{code will never be executed}}
- } else
- x = 0.8;
+static void test_fp16_b(__fp16 &x) {
+ if (x != 1 && x == 1.0) { // expected-note {{}} no-crash
+ x = 0.9; // expected-warning{{code will never be executed}}
+ } else
+ x = 0.8;
+}
+
+// ======= _Float16 version =======
+static void test_f16(_Float16 &x) {
+ if (x != 0 || x != 1.0) { // expected-note {{}} no-crash
+ x = 0.9;
+ } else
+ x = 0.8; // expected-warning{{code will never be executed}}
+}
+
+static void test_f16_b(_Float16 &x) {
+ if (x != 1 && x == 1.0) { // expected-note {{}} no-crash
+ x = 0.9; // expected-warning{{code will never be executed}}
+ } else
+ x = 0.8;
}
diff --git a/clang/test/SemaHLSL/BuiltIns/D3DCOLORtoUBYTE4-errors.hlsl b/clang/test/SemaHLSL/BuiltIns/D3DCOLORtoUBYTE4-errors.hlsl
index e9ba851..e9bf4c9 100644
--- a/clang/test/SemaHLSL/BuiltIns/D3DCOLORtoUBYTE4-errors.hlsl
+++ b/clang/test/SemaHLSL/BuiltIns/D3DCOLORtoUBYTE4-errors.hlsl
@@ -25,5 +25,5 @@ struct S {
int4 struct_arg(S v) {
return D3DCOLORtoUBYTE4(v);
// expected-error@-1 {{no matching function for call to 'D3DCOLORtoUBYTE4'}}
- // expected-note@hlsl/hlsl_intrinsics.h:* {{candidate function not viable: no known conversion from 'S' to 'vector<float, 4>' (vector of 4 'float' values) for 1st argument}}
+ // expected-note@hlsl/hlsl_intrinsics.h:* {{candidate function not viable: no known conversion from 'S' to 'float4' (aka 'vector<float, 4>') for 1st argument}}
}
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
index 3247380..83c63f1 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
@@ -1,7 +1,21 @@
// REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 -cl-std=CL2.0 -triple amdgcn-- -target-cpu gfx1250 -verify -S -o - %s
+typedef unsigned int uint;
+typedef unsigned short int ushort;
typedef int v2i __attribute__((ext_vector_type(2)));
+typedef unsigned int __attribute__((ext_vector_type(2))) uint2;
+typedef unsigned int __attribute__((ext_vector_type(3))) uint3;
+typedef __bf16 __attribute__((ext_vector_type(8))) bfloat8;
+typedef __bf16 __attribute__((ext_vector_type(16))) bfloat16;
+typedef __bf16 __attribute__((ext_vector_type(32))) bfloat32;
+typedef half __attribute__((ext_vector_type(8))) half8;
+typedef half __attribute__((ext_vector_type(16))) half16;
+typedef half __attribute__((ext_vector_type(32))) half32;
+typedef float __attribute__((ext_vector_type(8))) float8;
+typedef float __attribute__((ext_vector_type(16))) float16;
+typedef float __attribute__((ext_vector_type(32))) float32;
+
typedef int v4i __attribute__((ext_vector_type(4)));
typedef int v8i __attribute__((ext_vector_type(8)));
@@ -29,6 +43,32 @@ void test__builtin_amdgcn_cvt_f16_bf8(int a, int b) {
__builtin_amdgcn_cvt_f16_bf8(a, b); // expected-error {{'__builtin_amdgcn_cvt_f16_bf8' must be a constant integer}}
}
+void test_cvt_scale_pk(global half8 *outh8, global bfloat8 *outy8, uint2 src2,
+ global float32 *outf32, global half16 *outh16, global bfloat16 *outy16,
+ global float16 *outf16, uint3 src3,
+ global float8 *outf8, uint src1, uint scale, uint scale_sel)
+{
+ *outh8 = __builtin_amdgcn_cvt_scale_pk8_f16_fp8(src2, scale, scale_sel); // expected-error {{'__builtin_amdgcn_cvt_scale_pk8_f16_fp8' must be a constant integer}}
+ *outy8 = __builtin_amdgcn_cvt_scale_pk8_bf16_fp8(src2, scale, scale_sel); // expected-error {{'__builtin_amdgcn_cvt_scale_pk8_bf16_fp8' must be a constant integer}}
+ *outh8 = __builtin_amdgcn_cvt_scale_pk8_f16_bf8(src2, scale, scale_sel); // expected-error {{'__builtin_amdgcn_cvt_scale_pk8_f16_bf8' must be a constant integer}}
+ *outy8 = __builtin_amdgcn_cvt_scale_pk8_bf16_bf8(src2, scale, scale_sel); // expected-error {{'__builtin_amdgcn_cvt_scale_pk8_bf16_bf8' must be a constant integer}}
+ *outh8 = __builtin_amdgcn_cvt_scale_pk8_f16_fp4(src1, scale, scale_sel); // expected-error {{'__builtin_amdgcn_cvt_scale_pk8_f16_fp4' must be a constant integer}}
+ *outy8 = __builtin_amdgcn_cvt_scale_pk8_bf16_fp4(src1, scale, scale_sel); // expected-error {{'__builtin_amdgcn_cvt_scale_pk8_bf16_fp4' must be a constant integer}}
+ *outf8 = __builtin_amdgcn_cvt_scale_pk8_f32_fp8(src2, scale, scale_sel); // expected-error {{'__builtin_amdgcn_cvt_scale_pk8_f32_fp8' must be a constant integer}}
+ *outf8 = __builtin_amdgcn_cvt_scale_pk8_f32_bf8(src2, scale, scale_sel); // expected-error {{'__builtin_amdgcn_cvt_scale_pk8_f32_bf8' must be a constant integer}}
+ *outf8 = __builtin_amdgcn_cvt_scale_pk8_f32_fp4(src1, scale, scale_sel); // expected-error {{'__builtin_amdgcn_cvt_scale_pk8_f32_fp4' must be a constant integer}}
+
+ *outh8 = __builtin_amdgcn_cvt_scale_pk8_f16_fp8(src2, scale, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
+ *outy8 = __builtin_amdgcn_cvt_scale_pk8_bf16_fp8(src2, scale, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
+ *outh8 = __builtin_amdgcn_cvt_scale_pk8_f16_bf8(src2, scale, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
+ *outy8 = __builtin_amdgcn_cvt_scale_pk8_bf16_bf8(src2, scale, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
+ *outh8 = __builtin_amdgcn_cvt_scale_pk8_f16_fp4(src1, scale, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
+ *outy8 = __builtin_amdgcn_cvt_scale_pk8_bf16_fp4(src1, scale, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
+ *outf8 = __builtin_amdgcn_cvt_scale_pk8_f32_fp8(src2, scale, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
+ *outf8 = __builtin_amdgcn_cvt_scale_pk8_f32_bf8(src2, scale, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
+ *outf8 = __builtin_amdgcn_cvt_scale_pk8_f32_fp4(src1, scale, 8); // expected-error {{argument value 8 is outside the valid range [0, 7]}}
+}
+
void test_amdgcn_load_monitor(global int* b32gaddr, global v2i* b64gaddr, global v4i* b128gaddr, int *b32faddr, v2i* b64faddr, v4i *b128faddr,
global int* b32out, global v2i* b64out, global v4i* b128out, int cpol)
{
diff --git a/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-appertainment.cpp b/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-appertainment.cpp
index 872a44d..4774c8e 100644
--- a/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-appertainment.cpp
+++ b/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-appertainment.cpp
@@ -43,10 +43,11 @@ template<int, int = 0> struct KN;
////////////////////////////////////////////////////////////////////////////////
// Function declaration with GNU attribute spelling
+// expected-warning@+1 {{unknown attribute 'sycl_kernel_entry_point' ignored}}
__attribute__((sycl_kernel_entry_point(KN<1>)))
void ok1();
-// Function declaration with Clang attribute spelling.
+// Function declaration with C++11 attribute spelling.
[[clang::sycl_kernel_entry_point(KN<2>)]]
void ok2();
@@ -142,7 +143,7 @@ struct S15 {
// on occassion), main() still can't function as a SYCL kernel entry point,
// so this test ensures such attempted uses of the attribute are rejected.
struct Smain;
-// expected-error@+1 {{'sycl_kernel_entry_point' attribute only applies to functions with a 'void' return type}}
+// expected-error@+1 {{'clang::sycl_kernel_entry_point' attribute only applies to functions with a 'void' return type}}
[[clang::sycl_kernel_entry_point(Smain)]]
int main();
@@ -164,7 +165,7 @@ struct B2 {
struct B3 {
// Non-static member function declaration.
- // expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a non-static member function}}
+ // expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a non-static member function}}
[[clang::sycl_kernel_entry_point(BADKN<3>)]]
void bad3();
};
@@ -210,14 +211,14 @@ enum {
};
// Attribute added after the definition.
-// expected-error@+3 {{'sycl_kernel_entry_point' attribute cannot be added to a function after the function is defined}}
+// expected-error@+3 {{the 'clang::sycl_kernel_entry_point' attribute cannot be added to a function after the function is defined}}
// expected-note@+1 {{previous definition is here}}
void bad15() {}
[[clang::sycl_kernel_entry_point(BADKN<15>)]]
void bad15();
// The function must return void.
-// expected-error@+1 {{'sycl_kernel_entry_point' attribute only applies to functions with a 'void' return type}}
+// expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute only applies to functions with a 'void' return type}}
[[clang::sycl_kernel_entry_point(BADKN<16>)]]
int bad16();
@@ -230,12 +231,12 @@ void bad17(void (fp [[clang::sycl_kernel_entry_point(BADKN<17>)]])());
// FIXME: and the C++ standard is unclear regarding whether such attributes are
// FIXME: permitted. P3324 (Attributes for namespace aliases, template
// FIXME: parameters, and lambda captures) seeks to clarify the situation.
-// FIXME-expected-error@+1 {{'sycl_kernel_entry_point' attribute only applies to functions}}
+// FIXME-expected-error@+1 {{'clang::sycl_kernel_entry_point' attribute only applies to functions}}
template<void (fp [[clang::sycl_kernel_entry_point(BADKN<18>)]])()>
void bad18();
#if __cplusplus >= 202002L
-// expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a coroutine}}
+// expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a coroutine}}
[[clang::sycl_kernel_entry_point(BADKN<19>)]]
void bad19() {
co_return;
@@ -243,36 +244,36 @@ void bad19() {
#endif
struct B20 {
- // expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a non-static member function}}
+ // expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a non-static member function}}
[[clang::sycl_kernel_entry_point(BADKN<20>)]]
B20();
};
struct B21 {
- // expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a non-static member function}}
+ // expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a non-static member function}}
[[clang::sycl_kernel_entry_point(BADKN<21>)]]
~B21();
};
-// expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a variadic function}}
+// expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a variadic function}}
[[clang::sycl_kernel_entry_point(BADKN<22>)]]
void bad22(...);
-// expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a deleted function}}
+// expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a deleted function}}
[[clang::sycl_kernel_entry_point(BADKN<23>)]]
void bad23() = delete;
-// expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a constexpr function}}
+// expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a constexpr function}}
[[clang::sycl_kernel_entry_point(BADKN<24>)]]
constexpr void bad24() {}
#if __cplusplus >= 202002L
-// expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a consteval function}}
+// expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a consteval function}}
[[clang::sycl_kernel_entry_point(BADKN<25>)]]
consteval void bad25() {}
#endif
-// expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a function declared with the 'noreturn' attribute}}
+// expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a function declared with the 'noreturn' attribute}}
[[clang::sycl_kernel_entry_point(BADKN<26>)]]
[[noreturn]] void bad26();
@@ -283,7 +284,7 @@ __attribute__((target("sse4.2"))) void bad27();
template<typename KNT>
struct B28 {
- // expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a deleted function}}
+ // expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a deleted function}}
[[clang::sycl_kernel_entry_point(KNT)]]
friend void bad28() = delete;
};
@@ -291,7 +292,7 @@ struct B28 {
#if __cplusplus >= 202002L
template<typename KNT, typename T>
struct B29 {
- // expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a defaulted function}}
+ // expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a defaulted function}}
[[clang::sycl_kernel_entry_point(KNT)]]
friend T operator==(B29, B29) = default;
};
@@ -300,7 +301,7 @@ struct B29 {
#if __cplusplus >= 202002L
template<typename KNT>
struct B30 {
- // expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a coroutine}}
+ // expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a coroutine}}
[[clang::sycl_kernel_entry_point(KNT)]]
friend void bad30() { co_return; }
};
@@ -308,14 +309,14 @@ struct B30 {
template<typename KNT>
struct B31 {
- // expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a variadic function}}
+ // expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a variadic function}}
[[clang::sycl_kernel_entry_point(KNT)]]
friend void bad31(...) {}
};
template<typename KNT>
struct B32 {
- // expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a constexpr function}}
+ // expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a constexpr function}}
[[clang::sycl_kernel_entry_point(KNT)]]
friend constexpr void bad32() {}
};
@@ -323,7 +324,7 @@ struct B32 {
#if __cplusplus >= 202002L
template<typename KNT>
struct B33 {
- // expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a consteval function}}
+ // expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a consteval function}}
[[clang::sycl_kernel_entry_point(KNT)]]
friend consteval void bad33() {}
};
@@ -331,31 +332,31 @@ struct B33 {
template<typename KNT>
struct B34 {
- // expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a function declared with the 'noreturn' attribute}}
+ // expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a function declared with the 'noreturn' attribute}}
[[clang::sycl_kernel_entry_point(KNT)]]
[[noreturn]] friend void bad34() {}
};
#if __cplusplus >= 202302L
-// expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a non-static member function}}
+// expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a non-static member function}}
auto bad35 = [] [[clang::sycl_kernel_entry_point(BADKN<35>)]] -> void {};
#endif
#if __cplusplus >= 202302L
-// expected-error@+1 {{'sycl_kernel_entry_point' attribute only applies to functions with a non-deduced 'void' return type}}
+// expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute only applies to functions with a non-deduced 'void' return type}}
auto bad36 = [] [[clang::sycl_kernel_entry_point(BADKN<36>)]] static {};
#endif
#if __cplusplus >= 202302L
-// expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a coroutine}}
+// expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a coroutine}}
auto bad37 = [] [[clang::sycl_kernel_entry_point(BADKN<37>)]] static -> void { co_return; };
#endif
-// expected-error@+1 {{'sycl_kernel_entry_point' attribute cannot be applied to a function defined with a function try block}}
+// expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a function defined with a function try block}}
[[clang::sycl_kernel_entry_point(BADKN<38>)]]
void bad38() try {} catch(...) {}
-// expected-error@+2 {{'sycl_kernel_entry_point' attribute cannot be applied to a function defined with a function try block}}
+// expected-error@+2 {{the 'clang::sycl_kernel_entry_point' attribute cannot be applied to a function defined with a function try block}}
template<typename>
[[clang::sycl_kernel_entry_point(BADKN<39>)]]
void bad39() try {} catch(...) {}
diff --git a/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-kernel-name-module.cpp b/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-kernel-name-module.cpp
index 83c3e5c..8788e14 100644
--- a/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-kernel-name-module.cpp
+++ b/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-kernel-name-module.cpp
@@ -71,29 +71,29 @@ template void m2_test8<KN<8>>();
#include "m2.h"
// Expected diagnostics for m1_test3() and m2_test3():
-// expected-error@m2.h:4 {{'sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
+// expected-error@m2.h:4 {{the 'clang::sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
// expected-note@m1.h:12 {{previous declaration is here}}
// Expected diagnostics for m1_test4<KN<4>>() and m2_test4<KN<4>>():
-// expected-error@m2.h:8 {{'sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
+// expected-error@m2.h:8 {{the 'clang::sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
// expected-note@m1.h:16 {{previous declaration is here}}
-// expected-error@+3 {{'sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
+// expected-error@+3 {{the 'clang::sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
// expected-note@m1.h:4 {{previous declaration is here}}
[[clang::sycl_kernel_entry_point(KN<5>)]]
void test5() {}
-// expected-error@+3 {{'sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
+// expected-error@+3 {{the 'clang::sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
// expected-note@m1.h:8 {{previous declaration is here}}
[[clang::sycl_kernel_entry_point(KN<6>)]]
void test6() {}
-// expected-error@+3 {{'sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
+// expected-error@+3 {{the 'clang::sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
// expected-note@m2.h:12 {{previous declaration is here}}
[[clang::sycl_kernel_entry_point(KN<7>)]]
void test7() {}
-// expected-error@+3 {{'sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
+// expected-error@+3 {{the 'clang::sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
// expected-note@m2.h:16 {{previous declaration is here}}
[[clang::sycl_kernel_entry_point(KN<8>)]]
void test8() {}
diff --git a/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-kernel-name-pch.cpp b/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-kernel-name-pch.cpp
index 0814d89..0575a7a 100644
--- a/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-kernel-name-pch.cpp
+++ b/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-kernel-name-pch.cpp
@@ -25,12 +25,12 @@ template void pch_test2<KN<2>>();
#--- test.cpp
-// expected-error@+3 {{'sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
+// expected-error@+3 {{the 'clang::sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
// expected-note@pch.h:4 {{previous declaration is here}}
[[clang::sycl_kernel_entry_point(KN<1>)]]
void test1() {}
-// expected-error@+3 {{'sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
+// expected-error@+3 {{the 'clang::sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
// expected-note@pch.h:8 {{previous declaration is here}}
[[clang::sycl_kernel_entry_point(KN<2>)]]
void test2() {}
diff --git a/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-kernel-name.cpp b/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-kernel-name.cpp
index 78dd896..c7b8393 100644
--- a/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-kernel-name.cpp
+++ b/clang/test/SemaSYCL/sycl-kernel-entry-point-attr-kernel-name.cpp
@@ -7,7 +7,7 @@
// specification.
struct S1;
-// expected-warning@+3 {{redundant 'sycl_kernel_entry_point' attribute}}
+// expected-warning@+3 {{redundant 'clang::sycl_kernel_entry_point' attribute}}
// expected-note@+1 {{previous attribute is here}}
[[clang::sycl_kernel_entry_point(S1),
clang::sycl_kernel_entry_point(S1)]]
@@ -46,13 +46,13 @@ enum E9 : int; // #E9-decl
struct B10 {
struct MS;
};
-// FIXME-expected-error@+1 {{'sycl_kernel_entry_point' attribute argument must be a forward declarable class type}}
+// FIXME-expected-error@+1 {{the 'clang::sycl_kernel_entry_point' attribute argument must be a forward declarable class type}}
[[clang::sycl_kernel_entry_point(B10::MS)]] void bad10();
struct B11 {
struct MS;
};
-// FIXME-expected-error@+3 {{'sycl_kernel_entry_point' attribute argument must be a forward declarable class type}}
+// FIXME-expected-error@+3 {{the 'clang::sycl_kernel_entry_point' attribute argument must be a forward declarable class type}}
template<typename T>
[[clang::sycl_kernel_entry_point(typename T::MS)]] void bad11() {}
template void bad11<B11>();
@@ -60,35 +60,35 @@ template void bad11<B11>();
template<typename T>
[[clang::sycl_kernel_entry_point(T)]] void bad12();
void f12() {
- // FIXME-expected-error@+2 {{'sycl_kernel_entry_point' attribute argument must be a forward declarable class type}}
+ // FIXME-expected-error@+2 {{the 'clang::sycl_kernel_entry_point' attribute argument must be a forward declarable class type}}
struct LS;
bad12<LS>();
}
struct B13_1;
struct B13_2;
-// expected-error@+3 {{'sycl_kernel_entry_point' kernel name argument does not match prior declaration: 'B13_2' vs 'B13_1'}}
+// expected-error@+3 {{the 'clang::sycl_kernel_entry_point' kernel name argument does not match prior declaration: 'B13_2' vs 'B13_1'}}
// expected-note@+1 {{'bad13' declared here}}
[[clang::sycl_kernel_entry_point(B13_1)]] void bad13();
[[clang::sycl_kernel_entry_point(B13_2)]] void bad13() {}
struct B14_1;
struct B14_2;
-// expected-error@+3 {{'sycl_kernel_entry_point' kernel name argument does not match prior declaration: 'B14_2' vs 'B14_1'}}
+// expected-error@+3 {{the 'clang::sycl_kernel_entry_point' kernel name argument does not match prior declaration: 'B14_2' vs 'B14_1'}}
// expected-note@+1 {{previous attribute is here}}
[[clang::sycl_kernel_entry_point(B14_1),
clang::sycl_kernel_entry_point(B14_2)]]
void bad14();
struct B15;
-// expected-error@+3 {{'sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
+// expected-error@+3 {{the 'clang::sycl_kernel_entry_point' kernel name argument conflicts with a previous declaration}}
// expected-note@+1 {{previous declaration is here}}
[[clang::sycl_kernel_entry_point(B15)]] void bad15_1();
[[clang::sycl_kernel_entry_point(B15)]] void bad15_2();
struct B16_1;
struct B16_2;
-// expected-error@+4 {{'sycl_kernel_entry_point' kernel name argument does not match prior declaration: 'B16_2' vs 'B16_1'}}
+// expected-error@+4 {{the 'clang::sycl_kernel_entry_point' kernel name argument does not match prior declaration: 'B16_2' vs 'B16_1'}}
// expected-note@+1 {{'bad16' declared here}}
[[clang::sycl_kernel_entry_point(B16_1)]] void bad16();
void bad16(); // The attribute from the previous declaration is inherited.
diff --git a/clang/tools/clang-extdef-mapping/ClangExtDefMapGen.cpp b/clang/tools/clang-extdef-mapping/ClangExtDefMapGen.cpp
index 0b621b8..ddb2944 100644
--- a/clang/tools/clang-extdef-mapping/ClangExtDefMapGen.cpp
+++ b/clang/tools/clang-extdef-mapping/ClangExtDefMapGen.cpp
@@ -134,10 +134,9 @@ GetDiagnosticsEngine(DiagnosticOptions &DiagOpts) {
TextDiagnosticPrinter *DiagClient =
new TextDiagnosticPrinter(llvm::errs(), DiagOpts);
DiagClient->setPrefix("clang-extdef-mappping");
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- IntrusiveRefCntPtr<DiagnosticsEngine> DiagEngine(
- new DiagnosticsEngine(DiagID, DiagOpts, DiagClient));
+ auto DiagEngine = llvm::makeIntrusiveRefCnt<DiagnosticsEngine>(
+ DiagnosticIDs::create(), DiagOpts, DiagClient);
Diags.swap(DiagEngine);
// Retain this one time so it's not destroyed by ASTUnit::LoadFromASTFile
diff --git a/clang/tools/clang-format/ClangFormat.cpp b/clang/tools/clang-format/ClangFormat.cpp
index 24ad3cb..5f6502f 100644
--- a/clang/tools/clang-format/ClangFormat.cpp
+++ b/clang/tools/clang-format/ClangFormat.cpp
@@ -237,12 +237,11 @@ static bool parseLineRange(StringRef Input, unsigned &FromLine,
static bool fillRanges(MemoryBuffer *Code,
std::vector<tooling::Range> &Ranges) {
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
FileManager Files(FileSystemOptions(), InMemoryFileSystem);
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs), DiagOpts);
+ DiagnosticsEngine Diagnostics(DiagnosticIDs::create(), DiagOpts);
SourceManager Sources(Diagnostics, Files);
const auto ID = createInMemoryFile("<irrelevant>", *Code, Sources, Files,
InMemoryFileSystem.get());
@@ -511,15 +510,14 @@ static bool format(StringRef FileName, bool ErrorOnIncompleteFormat = false) {
if (OutputXML) {
outputXML(Replaces, FormatChanges, Status, Cursor, CursorPosition);
} else {
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
FileManager Files(FileSystemOptions(), InMemoryFileSystem);
DiagnosticOptions DiagOpts;
ClangFormatDiagConsumer IgnoreDiagnostics;
- DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs), DiagOpts,
- &IgnoreDiagnostics, false);
+ DiagnosticsEngine Diagnostics(DiagnosticIDs::create(), DiagOpts,
+ &IgnoreDiagnostics, false);
SourceManager Sources(Diagnostics, Files);
FileID ID = createInMemoryFile(AssumedFileName, *Code, Sources, Files,
InMemoryFileSystem.get());
diff --git a/clang/tools/clang-fuzzer/handle-cxx/handle_cxx.cpp b/clang/tools/clang-fuzzer/handle-cxx/handle_cxx.cpp
index 98925c0..8259361 100644
--- a/clang/tools/clang-fuzzer/handle-cxx/handle_cxx.cpp
+++ b/clang/tools/clang-fuzzer/handle-cxx/handle_cxx.cpp
@@ -33,9 +33,8 @@ void clang_fuzzer::HandleCXX(const std::string &S,
new FileManager(FileSystemOptions()));
IgnoringDiagConsumer Diags;
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<clang::DiagnosticIDs>(new DiagnosticIDs()), DiagOpts,
- &Diags, false);
+ DiagnosticsEngine Diagnostics(DiagnosticIDs::create(), DiagOpts, &Diags,
+ false);
std::unique_ptr<clang::CompilerInvocation> Invocation(
tooling::newInvocation(&Diagnostics, CC1Args, /*BinaryName=*/nullptr));
std::unique_ptr<llvm::MemoryBuffer> Input =
@@ -49,4 +48,3 @@ void clang_fuzzer::HandleCXX(const std::string &S,
action->runInvocation(std::move(Invocation), Files.get(), PCHContainerOps,
&Diags);
}
-
diff --git a/clang/tools/clang-import-test/clang-import-test.cpp b/clang/tools/clang-import-test/clang-import-test.cpp
index 7f5df92..ab021a5 100644
--- a/clang/tools/clang-import-test/clang-import-test.cpp
+++ b/clang/tools/clang-import-test/clang-import-test.cpp
@@ -236,7 +236,7 @@ std::unique_ptr<CodeGenerator> BuildCodeGen(CompilerInstance &CI,
llvm::LLVMContext &LLVMCtx) {
StringRef ModuleName("$__module");
return std::unique_ptr<CodeGenerator>(CreateLLVMCodeGen(
- CI.getDiagnostics(), ModuleName, &CI.getVirtualFileSystem(),
+ CI.getDiagnostics(), ModuleName, CI.getVirtualFileSystemPtr(),
CI.getHeaderSearchOpts(), CI.getPreprocessorOpts(), CI.getCodeGenOpts(),
LLVMCtx));
}
diff --git a/clang/tools/clang-installapi/ClangInstallAPI.cpp b/clang/tools/clang-installapi/ClangInstallAPI.cpp
index 60e9fc4..70091fc 100644
--- a/clang/tools/clang-installapi/ClangInstallAPI.cpp
+++ b/clang/tools/clang-installapi/ClangInstallAPI.cpp
@@ -77,16 +77,17 @@ static bool run(ArrayRef<const char *> Args, const char *ProgName) {
ArrayRef(Args).slice(1), MissingArgIndex, MissingArgCount);
ParseDiagnosticArgs(DiagOpts, ParsedArgs);
- IntrusiveRefCntPtr<DiagnosticsEngine> Diag = new clang::DiagnosticsEngine(
- new clang::DiagnosticIDs(), DiagOpts,
+ auto Diag = llvm::makeIntrusiveRefCnt<clang::DiagnosticsEngine>(
+ clang::DiagnosticIDs::create(), DiagOpts,
new clang::TextDiagnosticPrinter(llvm::errs(), DiagOpts));
// Create file manager for all file operations and holding in-memory generated
// inputs.
- llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
- new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto OverlayFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ llvm::vfs::getRealFileSystem());
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
IntrusiveRefCntPtr<clang::FileManager> FM(
new FileManager(clang::FileSystemOptions(), OverlayFileSystem));
diff --git a/clang/tools/clang-repl/CMakeLists.txt b/clang/tools/clang-repl/CMakeLists.txt
index 68d86dd..c3d14ce 100644
--- a/clang/tools/clang-repl/CMakeLists.txt
+++ b/clang/tools/clang-repl/CMakeLists.txt
@@ -19,14 +19,14 @@ if(MSVC)
set_target_properties(clang-repl PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS 1)
# RTTI/C++ symbols
- set(clang_repl_exports ${clang_repl_exports} ??_7type_info@@6B@
- ?__type_info_root_node@@3U__type_info_node@@A
- ?nothrow@std@@3Unothrow_t@1@B
+ set(clang_repl_exports ${clang_repl_exports} ??_7type_info@@6B@,DATA
+ ?__type_info_root_node@@3U__type_info_node@@A,DATA
+ ?nothrow@std@@3Unothrow_t@1@B,DATA
)
# Compiler added symbols for static variables. NOT for VStudio < 2015
- set(clang_repl_exports ${clang_repl_exports} _Init_thread_abort _Init_thread_epoch
- _Init_thread_footer _Init_thread_header _tls_index
+ set(clang_repl_exports ${clang_repl_exports} _Init_thread_abort _Init_thread_epoch,DATA
+ _Init_thread_footer _Init_thread_header _tls_index,DATA
)
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
@@ -50,7 +50,10 @@ if(MSVC)
endif()
# List to '/EXPORT:sym0 /EXPORT:sym1 /EXPORT:sym2 ...'
- list(TRANSFORM clang_repl_exports PREPEND "LINKER:/EXPORT:")
+ # The 'SHELL' prefix tells CMake to use a space instead of comma as the
+ # separator between the driver and linker options, which we need since MSVC's
+ # linker uses `,DATA` as a suffix to indicate that data is being exported.
+ list(TRANSFORM clang_repl_exports PREPEND "LINKER:SHELL:/EXPORT:")
set_property(TARGET clang-repl APPEND PROPERTY LINK_OPTIONS ${clang_repl_exports})
diff --git a/clang/tools/diagtool/ShowEnabledWarnings.cpp b/clang/tools/diagtool/ShowEnabledWarnings.cpp
index 0d1455d..bea0288 100644
--- a/clang/tools/diagtool/ShowEnabledWarnings.cpp
+++ b/clang/tools/diagtool/ShowEnabledWarnings.cpp
@@ -55,7 +55,6 @@ static char getCharForLevel(DiagnosticsEngine::Level Level) {
static IntrusiveRefCntPtr<DiagnosticsEngine>
createDiagnostics(unsigned int argc, char **argv) {
- IntrusiveRefCntPtr<DiagnosticIDs> DiagIDs(new DiagnosticIDs());
DiagnosticOptions DiagOpts;
// Buffer diagnostics from argument parsing so that we can output them using a
@@ -67,7 +66,8 @@ createDiagnostics(unsigned int argc, char **argv) {
Args.push_back("diagtool");
Args.append(argv, argv + argc);
CreateInvocationOptions CIOpts;
- CIOpts.Diags = new DiagnosticsEngine(DiagIDs, DiagOpts, DiagsBuffer);
+ CIOpts.Diags = llvm::makeIntrusiveRefCnt<DiagnosticsEngine>(
+ DiagnosticIDs::create(), DiagOpts, DiagsBuffer);
std::unique_ptr<CompilerInvocation> Invocation =
createInvocation(Args, CIOpts);
if (!Invocation)
diff --git a/clang/tools/diagtool/TreeView.cpp b/clang/tools/diagtool/TreeView.cpp
index 7e47c74..13b8015 100644
--- a/clang/tools/diagtool/TreeView.cpp
+++ b/clang/tools/diagtool/TreeView.cpp
@@ -32,7 +32,7 @@ public:
static bool isIgnored(unsigned DiagID) {
// FIXME: This feels like a hack.
static DiagnosticOptions DiagOpts;
- static clang::DiagnosticsEngine Diags(new DiagnosticIDs, DiagOpts);
+ static clang::DiagnosticsEngine Diags(DiagnosticIDs::create(), DiagOpts);
return Diags.isIgnored(DiagID, SourceLocation());
}
diff --git a/clang/tools/driver/cc1_main.cpp b/clang/tools/driver/cc1_main.cpp
index 9b1e390..854ab3e 100644
--- a/clang/tools/driver/cc1_main.cpp
+++ b/clang/tools/driver/cc1_main.cpp
@@ -217,7 +217,7 @@ static int PrintEnabledExtensions(const TargetOptions& TargetOpts) {
int cc1_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
ensureSufficientStack();
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
// Register the support for object-file-wrapped Clang modules.
auto PCHOps = std::make_shared<PCHContainerOperations>();
diff --git a/clang/tools/driver/cc1as_main.cpp b/clang/tools/driver/cc1as_main.cpp
index f938e7e..e9243ca 100644
--- a/clang/tools/driver/cc1as_main.cpp
+++ b/clang/tools/driver/cc1as_main.cpp
@@ -662,8 +662,7 @@ int cc1as_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
TextDiagnosticPrinter *DiagClient =
new TextDiagnosticPrinter(errs(), DiagOpts);
DiagClient->setPrefix("clang -cc1as");
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- DiagnosticsEngine Diags(DiagID, DiagOpts, DiagClient);
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), DiagOpts, DiagClient);
// Set an error handler, so that any LLVM backend diagnostics go through our
// error handler.
diff --git a/clang/tools/driver/cc1gen_reproducer_main.cpp b/clang/tools/driver/cc1gen_reproducer_main.cpp
index 8d7171e..ddff0d0 100644
--- a/clang/tools/driver/cc1gen_reproducer_main.cpp
+++ b/clang/tools/driver/cc1gen_reproducer_main.cpp
@@ -119,8 +119,8 @@ generateReproducerForInvocationArguments(ArrayRef<const char *> Argv,
DiagnosticOptions DiagOpts;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- DiagnosticsEngine Diags(DiagID, DiagOpts, new IgnoringDiagConsumer());
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), DiagOpts,
+ new IgnoringDiagConsumer());
auto VFS = llvm::vfs::getRealFileSystem();
ProcessWarningOptions(Diags, DiagOpts, *VFS, /*ReportDiags=*/false);
Driver TheDriver(ToolContext.Path, llvm::sys::getDefaultTargetTriple(), Diags,
diff --git a/clang/tools/driver/driver.cpp b/clang/tools/driver/driver.cpp
index 9ed8d4e..e5c3c4e 100644
--- a/clang/tools/driver/driver.cpp
+++ b/clang/tools/driver/driver.cpp
@@ -331,9 +331,7 @@ int clang_main(int Argc, char **Argv, const llvm::ToolContext &ToolContext) {
new TextDiagnosticPrinter(llvm::errs(), *DiagOpts);
FixupDiagPrefixExeName(DiagClient, ProgName);
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
-
- DiagnosticsEngine Diags(DiagID, *DiagOpts, DiagClient);
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), *DiagOpts, DiagClient);
if (!DiagOpts->DiagnosticSerializationFile.empty()) {
auto SerializedConsumer =
diff --git a/clang/tools/libclang/CIndexCodeCompletion.cpp b/clang/tools/libclang/CIndexCodeCompletion.cpp
index 8f6729b..adac7c3 100644
--- a/clang/tools/libclang/CIndexCodeCompletion.cpp
+++ b/clang/tools/libclang/CIndexCodeCompletion.cpp
@@ -357,8 +357,8 @@ static std::atomic<unsigned> CodeCompletionResultObjects;
AllocatedCXCodeCompleteResults::AllocatedCXCodeCompleteResults(
IntrusiveRefCntPtr<FileManager> FileMgr)
: CXCodeCompleteResults(),
- Diag(new DiagnosticsEngine(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs), DiagOpts)),
+ Diag(llvm::makeIntrusiveRefCnt<DiagnosticsEngine>(DiagnosticIDs::create(),
+ DiagOpts)),
FileMgr(std::move(FileMgr)),
SourceMgr(new SourceManager(*Diag, *this->FileMgr)),
CodeCompletionAllocator(
@@ -763,7 +763,7 @@ clang_codeCompleteAt_Impl(CXTranslationUnit TU, const char *complete_filename,
RemappedFiles, (options & CXCodeComplete_IncludeMacros),
(options & CXCodeComplete_IncludeCodePatterns),
IncludeBriefComments, Capture,
- CXXIdx->getPCHContainerOperations(), *Results->Diag,
+ CXXIdx->getPCHContainerOperations(), Results->Diag,
Results->LangOpts, *Results->SourceMgr, *Results->FileMgr,
Results->Diagnostics, Results->TemporaryBuffers,
/*SyntaxOnlyAction=*/nullptr);
diff --git a/clang/unittests/AST/ASTVectorTest.cpp b/clang/unittests/AST/ASTVectorTest.cpp
index 66003b4..03da549 100644
--- a/clang/unittests/AST/ASTVectorTest.cpp
+++ b/clang/unittests/AST/ASTVectorTest.cpp
@@ -26,14 +26,13 @@ namespace {
class ASTVectorTest : public ::testing::Test {
protected:
ASTVectorTest()
- : FileMgr(FileMgrOpts), DiagID(new DiagnosticIDs()),
- Diags(DiagID, DiagOpts, new IgnoringDiagConsumer()),
+ : FileMgr(FileMgrOpts),
+ Diags(DiagnosticIDs::create(), DiagOpts, new IgnoringDiagConsumer()),
SourceMgr(Diags, FileMgr), Idents(LangOpts, nullptr),
Ctxt(LangOpts, SourceMgr, Idents, Sels, Builtins, TU_Complete) {}
FileSystemOptions FileMgrOpts;
FileManager FileMgr;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID;
DiagnosticOptions DiagOpts;
DiagnosticsEngine Diags;
SourceManager SourceMgr;
diff --git a/clang/unittests/AST/CommentLexer.cpp b/clang/unittests/AST/CommentLexer.cpp
index dc10dae..99f4691 100644
--- a/clang/unittests/AST/CommentLexer.cpp
+++ b/clang/unittests/AST/CommentLexer.cpp
@@ -27,13 +27,12 @@ namespace {
class CommentLexerTest : public ::testing::Test {
protected:
CommentLexerTest()
- : FileMgr(FileMgrOpts), DiagID(new DiagnosticIDs()),
- Diags(DiagID, DiagOpts, new IgnoringDiagConsumer()),
+ : FileMgr(FileMgrOpts),
+ Diags(DiagnosticIDs::create(), DiagOpts, new IgnoringDiagConsumer()),
SourceMgr(Diags, FileMgr), Traits(Allocator, CommentOptions()) {}
FileSystemOptions FileMgrOpts;
FileManager FileMgr;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID;
DiagnosticOptions DiagOpts;
DiagnosticsEngine Diags;
SourceManager SourceMgr;
@@ -2006,4 +2005,3 @@ TEST_F(CommentLexerTest, MultipleComments) {
} // end namespace comments
} // end namespace clang
-
diff --git a/clang/unittests/AST/CommentParser.cpp b/clang/unittests/AST/CommentParser.cpp
index 67fabe5..3bd2bdb 100644
--- a/clang/unittests/AST/CommentParser.cpp
+++ b/clang/unittests/AST/CommentParser.cpp
@@ -33,13 +33,12 @@ const bool MY_DEBUG = true;
class CommentParserTest : public ::testing::Test {
protected:
CommentParserTest()
- : FileMgr(FileMgrOpts), DiagID(new DiagnosticIDs()),
- Diags(DiagID, DiagOpts, new IgnoringDiagConsumer()),
+ : FileMgr(FileMgrOpts),
+ Diags(DiagnosticIDs::create(), DiagOpts, new IgnoringDiagConsumer()),
SourceMgr(Diags, FileMgr), Traits(Allocator, CommentOptions()) {}
FileSystemOptions FileMgrOpts;
FileManager FileMgr;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID;
DiagnosticOptions DiagOpts;
DiagnosticsEngine Diags;
SourceManager SourceMgr;
diff --git a/clang/unittests/AST/CommentTextTest.cpp b/clang/unittests/AST/CommentTextTest.cpp
index 84ec51a..675173c 100644
--- a/clang/unittests/AST/CommentTextTest.cpp
+++ b/clang/unittests/AST/CommentTextTest.cpp
@@ -44,7 +44,7 @@ protected:
// shouldn't matter.
RawComment Comment(SourceMgr, CommentRange, EmptyOpts, /*Merged=*/true);
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diags(new DiagnosticIDs, DiagOpts);
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), DiagOpts);
return Comment.getFormattedText(SourceMgr, Diags);
}
};
diff --git a/clang/unittests/Analysis/MacroExpansionContextTest.cpp b/clang/unittests/Analysis/MacroExpansionContextTest.cpp
index 9874ea6..25a76ed 100644
--- a/clang/unittests/Analysis/MacroExpansionContextTest.cpp
+++ b/clang/unittests/Analysis/MacroExpansionContextTest.cpp
@@ -33,10 +33,10 @@ namespace {
class MacroExpansionContextTest : public ::testing::Test {
protected:
MacroExpansionContextTest()
- : InMemoryFileSystem(new llvm::vfs::InMemoryFileSystem),
+ : InMemoryFileSystem(
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>()),
FileMgr(FileSystemOptions(), InMemoryFileSystem),
- DiagID(new DiagnosticIDs()),
- Diags(DiagID, DiagOpts, new IgnoringDiagConsumer()),
+ Diags(DiagnosticIDs::create(), DiagOpts, new IgnoringDiagConsumer()),
SourceMgr(Diags, FileMgr), TargetOpts(new TargetOptions()) {
TargetOpts->Triple = "x86_64-pc-linux-unknown";
Target = TargetInfo::CreateTargetInfo(Diags, *TargetOpts);
@@ -45,7 +45,6 @@ protected:
IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem;
FileManager FileMgr;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID;
DiagnosticOptions DiagOpts;
DiagnosticsEngine Diags;
SourceManager SourceMgr;
diff --git a/clang/unittests/Analysis/UnsafeBufferUsageTest.cpp b/clang/unittests/Analysis/UnsafeBufferUsageTest.cpp
index 9da2c58..f795918 100644
--- a/clang/unittests/Analysis/UnsafeBufferUsageTest.cpp
+++ b/clang/unittests/Analysis/UnsafeBufferUsageTest.cpp
@@ -12,13 +12,12 @@ namespace {
class UnsafeBufferUsageTest : public ::testing::Test {
protected:
UnsafeBufferUsageTest()
- : FileMgr(FileMgrOpts), DiagID(new DiagnosticIDs()),
- Diags(DiagID, DiagOpts, new IgnoringDiagConsumer()),
+ : FileMgr(FileMgrOpts),
+ Diags(DiagnosticIDs::create(), DiagOpts, new IgnoringDiagConsumer()),
SourceMgr(Diags, FileMgr) {}
FileSystemOptions FileMgrOpts;
FileManager FileMgr;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID;
DiagnosticOptions DiagOpts;
DiagnosticsEngine Diags;
SourceManager SourceMgr;
@@ -58,4 +57,4 @@ TEST_F(UnsafeBufferUsageTest, FixItHintsConflict) {
Fixes = {H1, H2, H3, MkDummyHint(2, 23) /* overlaps H1, H2, and H3 */};
EXPECT_TRUE(internal::anyConflict(Fixes, SourceMgr));
-} \ No newline at end of file
+}
diff --git a/clang/unittests/Basic/DiagnosticTest.cpp b/clang/unittests/Basic/DiagnosticTest.cpp
index b0a034e..4b3af00 100644
--- a/clang/unittests/Basic/DiagnosticTest.cpp
+++ b/clang/unittests/Basic/DiagnosticTest.cpp
@@ -47,7 +47,7 @@ using testing::IsEmpty;
// Check that DiagnosticErrorTrap works with SuppressAllDiagnostics.
TEST(DiagnosticTest, suppressAndTrap) {
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diags(new DiagnosticIDs(), DiagOpts,
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), DiagOpts,
new IgnoringDiagConsumer());
Diags.setSuppressAllDiagnostics(true);
@@ -78,7 +78,7 @@ TEST(DiagnosticTest, suppressAndTrap) {
TEST(DiagnosticTest, fatalsAsError) {
for (unsigned FatalsAsError = 0; FatalsAsError != 2; ++FatalsAsError) {
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diags(new DiagnosticIDs(), DiagOpts,
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), DiagOpts,
new IgnoringDiagConsumer());
Diags.setFatalsAsError(FatalsAsError);
@@ -102,7 +102,7 @@ TEST(DiagnosticTest, fatalsAsError) {
TEST(DiagnosticTest, tooManyErrorsIsAlwaysFatal) {
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diags(new DiagnosticIDs(), DiagOpts,
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), DiagOpts,
new IgnoringDiagConsumer());
Diags.setFatalsAsError(true);
@@ -119,7 +119,7 @@ TEST(DiagnosticTest, tooManyErrorsIsAlwaysFatal) {
// Check that soft RESET works as intended
TEST(DiagnosticTest, softReset) {
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diags(new DiagnosticIDs(), DiagOpts,
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), DiagOpts,
new IgnoringDiagConsumer());
unsigned numWarnings = 0U, numErrors = 0U;
@@ -143,7 +143,7 @@ TEST(DiagnosticTest, softReset) {
TEST(DiagnosticTest, diagnosticError) {
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diags(new DiagnosticIDs(), DiagOpts,
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), DiagOpts,
new IgnoringDiagConsumer());
PartialDiagnostic::DiagStorageAllocator Alloc;
llvm::Expected<std::pair<int, int>> Value = DiagnosticError::create(
@@ -166,7 +166,7 @@ TEST(DiagnosticTest, diagnosticError) {
TEST(DiagnosticTest, storedDiagEmptyWarning) {
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diags(new DiagnosticIDs(), DiagOpts);
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), DiagOpts);
class CaptureDiagnosticConsumer : public DiagnosticConsumer {
public:
@@ -197,7 +197,7 @@ protected:
llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> FS =
llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diags{new DiagnosticIDs(), DiagOpts};
+ DiagnosticsEngine Diags{DiagnosticIDs::create(), DiagOpts};
llvm::ArrayRef<StoredDiagnostic> diags() {
return CaptureConsumer.StoredDiags;
diff --git a/clang/unittests/Basic/FileManagerTest.cpp b/clang/unittests/Basic/FileManagerTest.cpp
index 88d778f..7b3e8bc 100644
--- a/clang/unittests/Basic/FileManagerTest.cpp
+++ b/clang/unittests/Basic/FileManagerTest.cpp
@@ -454,8 +454,7 @@ TEST_F(FileManagerTest, makeAbsoluteUsesVFS) {
: StringRef("/");
llvm::sys::path::append(CustomWorkingDir, "some", "weird", "path");
- auto FS = IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem>(
- new llvm::vfs::InMemoryFileSystem);
+ auto FS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
// setCurrentworkingdirectory must finish without error.
ASSERT_TRUE(!FS->setCurrentWorkingDirectory(CustomWorkingDir));
@@ -475,8 +474,7 @@ TEST_F(FileManagerTest, makeAbsoluteUsesVFS) {
TEST_F(FileManagerTest, getVirtualFileFillsRealPathName) {
SmallString<64> CustomWorkingDir = getSystemRoot();
- auto FS = IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem>(
- new llvm::vfs::InMemoryFileSystem);
+ auto FS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
// setCurrentworkingdirectory must finish without error.
ASSERT_TRUE(!FS->setCurrentWorkingDirectory(CustomWorkingDir));
@@ -501,8 +499,7 @@ TEST_F(FileManagerTest, getVirtualFileFillsRealPathName) {
TEST_F(FileManagerTest, getFileDontOpenRealPath) {
SmallString<64> CustomWorkingDir = getSystemRoot();
- auto FS = IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem>(
- new llvm::vfs::InMemoryFileSystem);
+ auto FS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
// setCurrentworkingdirectory must finish without error.
ASSERT_TRUE(!FS->setCurrentWorkingDirectory(CustomWorkingDir));
@@ -533,8 +530,7 @@ TEST_F(FileManagerTest, getBypassFile) {
CustomWorkingDir = "/";
#endif
- auto FS = IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem>(
- new llvm::vfs::InMemoryFileSystem);
+ auto FS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
// setCurrentworkingdirectory must finish without error.
ASSERT_TRUE(!FS->setCurrentWorkingDirectory(CustomWorkingDir));
diff --git a/clang/unittests/Basic/SarifTest.cpp b/clang/unittests/Basic/SarifTest.cpp
index ad9f8ec..089b6cb 100644
--- a/clang/unittests/Basic/SarifTest.cpp
+++ b/clang/unittests/Basic/SarifTest.cpp
@@ -41,15 +41,14 @@ static std::string serializeSarifDocument(llvm::json::Object &&Doc) {
class SarifDocumentWriterTest : public ::testing::Test {
protected:
SarifDocumentWriterTest()
- : InMemoryFileSystem(new llvm::vfs::InMemoryFileSystem),
+ : InMemoryFileSystem(
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>()),
FileMgr(FileSystemOptions(), InMemoryFileSystem),
- DiagID(new DiagnosticIDs()),
- Diags(DiagID, DiagOpts, new IgnoringDiagConsumer()),
+ Diags(DiagnosticIDs::create(), DiagOpts, new IgnoringDiagConsumer()),
SourceMgr(Diags, FileMgr) {}
IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem;
FileManager FileMgr;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID;
DiagnosticOptions DiagOpts;
DiagnosticsEngine Diags;
SourceManager SourceMgr;
diff --git a/clang/unittests/Basic/SourceManagerTest.cpp b/clang/unittests/Basic/SourceManagerTest.cpp
index cbe047b..04b23dd 100644
--- a/clang/unittests/Basic/SourceManagerTest.cpp
+++ b/clang/unittests/Basic/SourceManagerTest.cpp
@@ -40,8 +40,8 @@ namespace {
class SourceManagerTest : public ::testing::Test {
protected:
SourceManagerTest()
- : FileMgr(FileMgrOpts), DiagID(new DiagnosticIDs()),
- Diags(DiagID, DiagOpts, new IgnoringDiagConsumer()),
+ : FileMgr(FileMgrOpts),
+ Diags(DiagnosticIDs::create(), DiagOpts, new IgnoringDiagConsumer()),
SourceMgr(Diags, FileMgr), TargetOpts(new TargetOptions) {
TargetOpts->Triple = "x86_64-apple-darwin11.1.0";
Target = TargetInfo::CreateTargetInfo(Diags, *TargetOpts);
@@ -49,7 +49,6 @@ protected:
FileSystemOptions FileMgrOpts;
FileManager FileMgr;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID;
DiagnosticOptions DiagOpts;
DiagnosticsEngine Diags;
SourceManager SourceMgr;
diff --git a/clang/unittests/CodeGen/TestCompiler.h b/clang/unittests/CodeGen/TestCompiler.h
index a6fec7f..f6fada5 100644
--- a/clang/unittests/CodeGen/TestCompiler.h
+++ b/clang/unittests/CodeGen/TestCompiler.h
@@ -58,7 +58,7 @@ struct TestCompiler {
CG.reset(CreateLLVMCodeGen(
compiler.getDiagnostics(), "main-module",
- &compiler.getVirtualFileSystem(), compiler.getHeaderSearchOpts(),
+ compiler.getVirtualFileSystemPtr(), compiler.getHeaderSearchOpts(),
compiler.getPreprocessorOpts(), compiler.getCodeGenOpts(), Context));
}
diff --git a/clang/unittests/Driver/DXCModeTest.cpp b/clang/unittests/Driver/DXCModeTest.cpp
index f684593..e7d8137 100644
--- a/clang/unittests/Driver/DXCModeTest.cpp
+++ b/clang/unittests/Driver/DXCModeTest.cpp
@@ -55,17 +55,15 @@ static void validateTargetProfile(
}
TEST(DxcModeTest, TargetProfileValidation) {
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
-
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
InMemoryFileSystem->addFile("foo.hlsl", 0,
llvm::MemoryBuffer::getMemBuffer("\n"));
auto *DiagConsumer = new SimpleDiagnosticConsumer;
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diags(DiagID, DiagOpts, DiagConsumer);
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), DiagOpts, DiagConsumer);
validateTargetProfile("-Tvs_6_0", "dxilv1.0--shadermodel6.0-vertex",
InMemoryFileSystem, Diags);
@@ -105,17 +103,15 @@ TEST(DxcModeTest, TargetProfileValidation) {
}
TEST(DxcModeTest, ValidatorVersionValidation) {
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
-
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
InMemoryFileSystem->addFile("foo.hlsl", 0,
llvm::MemoryBuffer::getMemBuffer("\n"));
auto *DiagConsumer = new SimpleDiagnosticConsumer;
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diags(DiagID, DiagOpts, DiagConsumer);
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), DiagOpts, DiagConsumer);
Driver TheDriver("/bin/clang", "", Diags, "", InMemoryFileSystem);
std::unique_ptr<Compilation> C(TheDriver.BuildCompilation(
{"clang", "--driver-mode=dxc", "-Tlib_6_7", "foo.hlsl"}));
@@ -210,8 +206,8 @@ TEST(DxcModeTest, ValidatorVersionValidation) {
}
TEST(DxcModeTest, DefaultEntry) {
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
InMemoryFileSystem->addFile("foo.hlsl", 0,
llvm::MemoryBuffer::getMemBuffer("\n"));
diff --git a/clang/unittests/Driver/SanitizerArgsTest.cpp b/clang/unittests/Driver/SanitizerArgsTest.cpp
index b8bfc68..7847947 100644
--- a/clang/unittests/Driver/SanitizerArgsTest.cpp
+++ b/clang/unittests/Driver/SanitizerArgsTest.cpp
@@ -53,7 +53,7 @@ protected:
assert(!DriverInstance && "Running twice is not allowed");
DiagnosticOptions DiagOpts;
- DiagnosticsEngine Diags(new DiagnosticIDs, DiagOpts,
+ DiagnosticsEngine Diags(DiagnosticIDs::create(), DiagOpts,
new TextDiagnosticPrinter(llvm::errs(), DiagOpts));
DriverInstance.emplace(ClangBinary, "x86_64-unknown-linux-gnu", Diags,
"clang LLVM compiler", prepareFS(ExtraFiles));
@@ -78,8 +78,7 @@ protected:
private:
llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem>
prepareFS(llvm::ArrayRef<std::string> ExtraFiles) {
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> FS =
- new llvm::vfs::InMemoryFileSystem;
+ auto FS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
FS->addFile(ClangBinary, time_t(), llvm::MemoryBuffer::getMemBuffer(""));
FS->addFile(InputFile, time_t(), llvm::MemoryBuffer::getMemBuffer(""));
for (llvm::StringRef F : ExtraFiles)
diff --git a/clang/unittests/Driver/SimpleDiagnosticConsumer.h b/clang/unittests/Driver/SimpleDiagnosticConsumer.h
index c3772ba..7ab409b 100644
--- a/clang/unittests/Driver/SimpleDiagnosticConsumer.h
+++ b/clang/unittests/Driver/SimpleDiagnosticConsumer.h
@@ -42,13 +42,12 @@ struct SimpleDiagnosticConsumer : public clang::DiagnosticConsumer {
// for testing situations where it will only ever be used for emitting
// diagnostics, such as being passed to `MultilibSet::select`.
inline clang::driver::Driver diagnostic_test_driver() {
- llvm::IntrusiveRefCntPtr<clang::DiagnosticIDs> DiagID(
- new clang::DiagnosticIDs());
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
auto *DiagConsumer = new SimpleDiagnosticConsumer;
clang::DiagnosticOptions DiagOpts;
- clang::DiagnosticsEngine Diags(DiagID, DiagOpts, DiagConsumer);
+ clang::DiagnosticsEngine Diags(clang::DiagnosticIDs::create(), DiagOpts,
+ DiagConsumer);
return clang::driver::Driver("/bin/clang", "", Diags, "", InMemoryFileSystem);
}
diff --git a/clang/unittests/Driver/ToolChainTest.cpp b/clang/unittests/Driver/ToolChainTest.cpp
index 670090a..4fa2729 100644
--- a/clang/unittests/Driver/ToolChainTest.cpp
+++ b/clang/unittests/Driver/ToolChainTest.cpp
@@ -40,10 +40,10 @@ namespace {
TEST(ToolChainTest, VFSGCCInstallation) {
DiagnosticOptions DiagOpts;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
struct TestDiagnosticConsumer : public DiagnosticConsumer {};
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
const char *EmptyFiles[] = {
"foo.cpp",
@@ -137,11 +137,11 @@ TEST(ToolChainTest, VFSGCCInstallation) {
TEST(ToolChainTest, VFSGCCInstallationRelativeDir) {
DiagnosticOptions DiagOpts;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
struct TestDiagnosticConsumer : public DiagnosticConsumer {};
DiagnosticsEngine Diags(DiagID, DiagOpts, new TestDiagnosticConsumer);
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
Driver TheDriver("/home/test/bin/clang", "arm-linux-gnueabi", Diags,
"clang LLVM compiler", InMemoryFileSystem);
@@ -176,10 +176,10 @@ TEST(ToolChainTest, VFSGCCInstallationRelativeDir) {
TEST(ToolChainTest, VFSSolarisMultiGCCInstallation) {
DiagnosticOptions DiagOpts;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
struct TestDiagnosticConsumer : public DiagnosticConsumer {};
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
const char *EmptyFiles[] = {
// Sort entries so the latest version doesn't come first.
@@ -340,10 +340,10 @@ MATCHER_P(jobHasArgs, Substr, "") {
TEST(ToolChainTest, VFSGnuLibcxxPathNoSysroot) {
DiagnosticOptions DiagOpts;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
struct TestDiagnosticConsumer : public DiagnosticConsumer {};
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
const char *EmptyFiles[] = {
"foo.cpp",
@@ -371,11 +371,11 @@ TEST(ToolChainTest, VFSGnuLibcxxPathNoSysroot) {
TEST(ToolChainTest, DefaultDriverMode) {
DiagnosticOptions DiagOpts;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
struct TestDiagnosticConsumer : public DiagnosticConsumer {};
DiagnosticsEngine Diags(DiagID, DiagOpts, new TestDiagnosticConsumer);
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
Driver CCDriver("/home/test/bin/clang", "arm-linux-gnueabi", Diags,
"clang LLVM compiler", InMemoryFileSystem);
@@ -402,7 +402,7 @@ TEST(ToolChainTest, DefaultDriverMode) {
EXPECT_TRUE(CLDriver.IsCLMode());
}
TEST(ToolChainTest, InvalidArgument) {
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
struct TestDiagnosticConsumer : public DiagnosticConsumer {};
DiagnosticOptions DiagOpts;
DiagnosticsEngine Diags(DiagID, DiagOpts, new TestDiagnosticConsumer);
@@ -517,11 +517,11 @@ TEST(ToolChainTest, GetTargetAndMode) {
TEST(ToolChainTest, CommandOutput) {
DiagnosticOptions DiagOpts;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
struct TestDiagnosticConsumer : public DiagnosticConsumer {};
DiagnosticsEngine Diags(DiagID, DiagOpts, new TestDiagnosticConsumer);
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
Driver CCDriver("/home/test/bin/clang", "arm-linux-gnueabi", Diags,
"clang LLVM compiler", InMemoryFileSystem);
@@ -545,11 +545,11 @@ TEST(ToolChainTest, CommandOutput) {
TEST(ToolChainTest, PostCallback) {
DiagnosticOptions DiagOpts;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
struct TestDiagnosticConsumer : public DiagnosticConsumer {};
DiagnosticsEngine Diags(DiagID, DiagOpts, new TestDiagnosticConsumer);
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
// The executable path must not exist.
Driver CCDriver("/home/test/bin/clang", "arm-linux-gnueabi", Diags,
@@ -598,11 +598,11 @@ TEST(ToolChainTest, UEFICallingConventionTest) {
TEST(ToolChainTest, UEFIDefaultDebugFormatTest) {
DiagnosticOptions DiagOpts;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
struct TestDiagnosticConsumer : public DiagnosticConsumer {};
DiagnosticsEngine Diags(DiagID, DiagOpts, new TestDiagnosticConsumer);
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
Driver CCDriver("/home/test/bin/clang", "x86_64-unknown-uefi", Diags,
"clang LLVM compiler", InMemoryFileSystem);
CCDriver.setCheckInputsExist(false);
@@ -640,11 +640,10 @@ struct SimpleDiagnosticConsumer : public DiagnosticConsumer {
TEST(ToolChainTest, ConfigFileSearch) {
DiagnosticOptions DiagOpts;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
struct TestDiagnosticConsumer : public DiagnosticConsumer {};
DiagnosticsEngine Diags(DiagID, DiagOpts, new TestDiagnosticConsumer);
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> FS(
- new llvm::vfs::InMemoryFileSystem);
+ auto FS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
#ifdef _WIN32
const char *TestRoot = "C:\\";
@@ -717,11 +716,11 @@ struct FileSystemWithError : public llvm::vfs::FileSystem {
TEST(ToolChainTest, ConfigFileError) {
DiagnosticOptions DiagOpts;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
std::unique_ptr<SimpleDiagnosticConsumer> DiagConsumer(
new SimpleDiagnosticConsumer());
DiagnosticsEngine Diags(DiagID, DiagOpts, DiagConsumer.get(), false);
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS(new FileSystemWithError);
+ auto FS = llvm::makeIntrusiveRefCnt<FileSystemWithError>();
Driver TheDriver("/home/test/bin/clang", "arm-linux-gnueabi", Diags,
"clang LLVM compiler", FS);
@@ -738,12 +737,11 @@ TEST(ToolChainTest, ConfigFileError) {
TEST(ToolChainTest, BadConfigFile) {
DiagnosticOptions DiagOpts;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
std::unique_ptr<SimpleDiagnosticConsumer> DiagConsumer(
new SimpleDiagnosticConsumer());
DiagnosticsEngine Diags(DiagID, DiagOpts, DiagConsumer.get(), false);
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> FS(
- new llvm::vfs::InMemoryFileSystem);
+ auto FS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
#ifdef _WIN32
const char *TestRoot = "C:\\";
@@ -812,12 +810,11 @@ TEST(ToolChainTest, BadConfigFile) {
TEST(ToolChainTest, ConfigInexistentInclude) {
DiagnosticOptions DiagOpts;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
std::unique_ptr<SimpleDiagnosticConsumer> DiagConsumer(
new SimpleDiagnosticConsumer());
DiagnosticsEngine Diags(DiagID, DiagOpts, DiagConsumer.get(), false);
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> FS(
- new llvm::vfs::InMemoryFileSystem);
+ auto FS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
#ifdef _WIN32
const char *TestRoot = "C:\\";
@@ -853,12 +850,11 @@ TEST(ToolChainTest, ConfigInexistentInclude) {
TEST(ToolChainTest, ConfigRecursiveInclude) {
DiagnosticOptions DiagOpts;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
std::unique_ptr<SimpleDiagnosticConsumer> DiagConsumer(
new SimpleDiagnosticConsumer());
DiagnosticsEngine Diags(DiagID, DiagOpts, DiagConsumer.get(), false);
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> FS(
- new llvm::vfs::InMemoryFileSystem);
+ auto FS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
#ifdef _WIN32
const char *TestRoot = "C:\\";
@@ -899,11 +895,10 @@ TEST(ToolChainTest, ConfigRecursiveInclude) {
TEST(ToolChainTest, NestedConfigFile) {
DiagnosticOptions DiagOpts;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID = DiagnosticIDs::create();
struct TestDiagnosticConsumer : public DiagnosticConsumer {};
DiagnosticsEngine Diags(DiagID, DiagOpts, new TestDiagnosticConsumer);
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> FS(
- new llvm::vfs::InMemoryFileSystem);
+ auto FS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
#ifdef _WIN32
const char *TestRoot = "C:\\";
diff --git a/clang/unittests/Frontend/ASTUnitTest.cpp b/clang/unittests/Frontend/ASTUnitTest.cpp
index afa64b5..7148ca0 100644
--- a/clang/unittests/Frontend/ASTUnitTest.cpp
+++ b/clang/unittests/Frontend/ASTUnitTest.cpp
@@ -119,8 +119,7 @@ TEST_F(ASTUnitTest, GetBufferForFileMemoryMapping) {
}
TEST_F(ASTUnitTest, ModuleTextualHeader) {
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFs =
- new llvm::vfs::InMemoryFileSystem();
+ auto InMemoryFs = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
InMemoryFs->addFile("test.cpp", 0, llvm::MemoryBuffer::getMemBuffer(R"cpp(
#include "Textual.h"
void foo() {}
diff --git a/clang/unittests/Frontend/CodeGenActionTest.cpp b/clang/unittests/Frontend/CodeGenActionTest.cpp
index 90818b7..b2792c4 100644
--- a/clang/unittests/Frontend/CodeGenActionTest.cpp
+++ b/clang/unittests/Frontend/CodeGenActionTest.cpp
@@ -76,40 +76,4 @@ TEST(CodeGenTest, CodeGenFromIRMemBuffer) {
bool Success = Compiler.ExecuteAction(Action);
EXPECT_TRUE(Success);
}
-
-TEST(CodeGenTest, DebugInfoCWDCodeGen) {
- // Check that debug info is accessing the current working directory from the
- // VFS instead of calling \p llvm::sys::fs::current_path() directly.
-
- auto Sept = llvm::sys::path::get_separator();
- auto VFS = std::make_unique<llvm::vfs::InMemoryFileSystem>();
- VFS->setCurrentWorkingDirectory(
- std::string(llvm::formatv("{0}in-memory-fs-cwd", Sept)));
- std::string TestPath =
- std::string(llvm::formatv("{0}in-memory-fs-cwd{0}test.cpp", Sept));
- VFS->addFile(TestPath, 0, llvm::MemoryBuffer::getMemBuffer("int x;\n"));
-
- auto Invocation = std::make_shared<CompilerInvocation>();
- Invocation->getFrontendOpts().Inputs.push_back(
- FrontendInputFile("test.cpp", Language::CXX));
- Invocation->getFrontendOpts().ProgramAction = EmitLLVM;
- Invocation->getTargetOpts().Triple = "x86_64-unknown-linux-gnu";
- Invocation->getCodeGenOpts().setDebugInfo(codegenoptions::FullDebugInfo);
- CompilerInstance Compiler(std::move(Invocation));
-
- SmallString<256> IRBuffer;
- Compiler.setOutputStream(std::make_unique<raw_svector_ostream>(IRBuffer));
- Compiler.createDiagnostics(*VFS);
- Compiler.createFileManager(std::move(VFS));
-
- EmitLLVMAction Action;
- bool Success = Compiler.ExecuteAction(Action);
- EXPECT_TRUE(Success);
-
- SmallString<128> RealCWD;
- llvm::sys::fs::current_path(RealCWD);
- EXPECT_TRUE(!RealCWD.empty());
- EXPECT_FALSE(IRBuffer.str().contains(RealCWD));
- EXPECT_TRUE(IRBuffer.str().contains("in-memory-fs-cwd"));
-}
}
diff --git a/clang/unittests/Frontend/CompilerInstanceTest.cpp b/clang/unittests/Frontend/CompilerInstanceTest.cpp
index 459a386..7c1b653 100644
--- a/clang/unittests/Frontend/CompilerInstanceTest.cpp
+++ b/clang/unittests/Frontend/CompilerInstanceTest.cpp
@@ -71,7 +71,7 @@ TEST(CompilerInstance, DefaultVFSOverlayFromInvocation) {
// Create a minimal CompilerInstance which should use the VFS we specified
// in the CompilerInvocation (as we don't explicitly set our own).
CompilerInstance Instance(std::move(CInvok));
- Instance.setDiagnostics(Diags.get());
+ Instance.setDiagnostics(Diags);
Instance.createFileManager();
// Check if the virtual file exists which means that our VFS is used by the
@@ -135,7 +135,7 @@ TEST(CompilerInstance, MultipleInputsCleansFileIDs) {
ASSERT_TRUE(CInvok) << "could not create compiler invocation";
CompilerInstance Instance(std::move(CInvok));
- Instance.setDiagnostics(Diags.get());
+ Instance.setDiagnostics(Diags);
Instance.createFileManager(VFS);
// Run once for `a.cc` and then for `a.h`. This makes sure we get the same
diff --git a/clang/unittests/Frontend/PCHPreambleTest.cpp b/clang/unittests/Frontend/PCHPreambleTest.cpp
index faad408..d27f793 100644
--- a/clang/unittests/Frontend/PCHPreambleTest.cpp
+++ b/clang/unittests/Frontend/PCHPreambleTest.cpp
@@ -62,7 +62,7 @@ public:
void TearDown() override {}
void ResetVFS() {
- VFS = new ReadCountingInMemoryFileSystem();
+ VFS = llvm::makeIntrusiveRefCnt<ReadCountingInMemoryFileSystem>();
// We need the working directory to be set to something absolute,
// otherwise it ends up being inadvertently set to the current
// working directory in the real file system due to a series of
diff --git a/clang/unittests/Frontend/ReparseWorkingDirTest.cpp b/clang/unittests/Frontend/ReparseWorkingDirTest.cpp
index 1b8051f..38ef468 100644
--- a/clang/unittests/Frontend/ReparseWorkingDirTest.cpp
+++ b/clang/unittests/Frontend/ReparseWorkingDirTest.cpp
@@ -28,7 +28,9 @@ class ReparseWorkingDirTest : public ::testing::Test {
std::shared_ptr<PCHContainerOperations> PCHContainerOpts;
public:
- void SetUp() override { VFS = new vfs::InMemoryFileSystem(); }
+ void SetUp() override {
+ VFS = llvm::makeIntrusiveRefCnt<vfs::InMemoryFileSystem>();
+ }
void TearDown() override {}
void setWorkingDirectory(StringRef Path) {
diff --git a/clang/unittests/Frontend/SearchPathTest.cpp b/clang/unittests/Frontend/SearchPathTest.cpp
index c74a5c7..a8c16fe 100644
--- a/clang/unittests/Frontend/SearchPathTest.cpp
+++ b/clang/unittests/Frontend/SearchPathTest.cpp
@@ -40,8 +40,8 @@ namespace {
class SearchPathTest : public ::testing::Test {
protected:
SearchPathTest()
- : Diags(new DiagnosticIDs(), DiagOpts, new IgnoringDiagConsumer()),
- VFS(new llvm::vfs::InMemoryFileSystem),
+ : Diags(DiagnosticIDs::create(), DiagOpts, new IgnoringDiagConsumer()),
+ VFS(llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>()),
FileMgr(FileSystemOptions(), VFS), SourceMgr(Diags, FileMgr),
Invocation(std::make_unique<CompilerInvocation>()) {}
diff --git a/clang/unittests/Frontend/TextDiagnosticTest.cpp b/clang/unittests/Frontend/TextDiagnosticTest.cpp
index 8fd8187..622dbc5 100644
--- a/clang/unittests/Frontend/TextDiagnosticTest.cpp
+++ b/clang/unittests/Frontend/TextDiagnosticTest.cpp
@@ -36,9 +36,8 @@ TEST(TextDiagnostic, ShowLine) {
// Create dummy FileManager and SourceManager.
FileSystemOptions FSOpts;
FileManager FileMgr(FSOpts);
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs);
DiagnosticOptions DiagEngineOpts;
- DiagnosticsEngine DiagEngine(DiagID, DiagEngineOpts,
+ DiagnosticsEngine DiagEngine(DiagnosticIDs::create(), DiagEngineOpts,
new IgnoringDiagConsumer());
SourceManager SrcMgr(DiagEngine, FileMgr);
diff --git a/clang/unittests/Frontend/UtilsTest.cpp b/clang/unittests/Frontend/UtilsTest.cpp
index cf385a5..fc411e4 100644
--- a/clang/unittests/Frontend/UtilsTest.cpp
+++ b/clang/unittests/Frontend/UtilsTest.cpp
@@ -29,7 +29,7 @@ TEST(BuildCompilerInvocationTest, RecoverMultipleJobs) {
clang::DiagnosticOptions DiagOpts;
CreateInvocationOptions Opts;
Opts.RecoverOnError = true;
- Opts.VFS = new llvm::vfs::InMemoryFileSystem();
+ Opts.VFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
Opts.Diags = clang::CompilerInstance::createDiagnostics(*Opts.VFS, DiagOpts,
&D, false);
std::unique_ptr<CompilerInvocation> CI = createInvocation(Args, Opts);
diff --git a/clang/unittests/Lex/HeaderSearchTest.cpp b/clang/unittests/Lex/HeaderSearchTest.cpp
index 9903c12..0213bfe 100644
--- a/clang/unittests/Lex/HeaderSearchTest.cpp
+++ b/clang/unittests/Lex/HeaderSearchTest.cpp
@@ -28,9 +28,9 @@ namespace {
class HeaderSearchTest : public ::testing::Test {
protected:
HeaderSearchTest()
- : VFS(new llvm::vfs::InMemoryFileSystem), FileMgr(FileMgrOpts, VFS),
- DiagID(new DiagnosticIDs()),
- Diags(DiagID, DiagOpts, new IgnoringDiagConsumer()),
+ : VFS(llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>()),
+ FileMgr(FileMgrOpts, VFS),
+ Diags(DiagnosticIDs::create(), DiagOpts, new IgnoringDiagConsumer()),
SourceMgr(Diags, FileMgr), TargetOpts(new TargetOptions),
Search(HSOpts, SourceMgr, Diags, LangOpts, Target.get()) {
TargetOpts->Triple = "x86_64-apple-darwin11.1.0";
@@ -80,7 +80,6 @@ protected:
IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> VFS;
FileSystemOptions FileMgrOpts;
FileManager FileMgr;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID;
DiagnosticOptions DiagOpts;
DiagnosticsEngine Diags;
SourceManager SourceMgr;
diff --git a/clang/unittests/Lex/LexerTest.cpp b/clang/unittests/Lex/LexerTest.cpp
index 86df872..56d73ce 100644
--- a/clang/unittests/Lex/LexerTest.cpp
+++ b/clang/unittests/Lex/LexerTest.cpp
@@ -41,8 +41,8 @@ using testing::ElementsAre;
class LexerTest : public ::testing::Test {
protected:
LexerTest()
- : FileMgr(FileMgrOpts), DiagID(new DiagnosticIDs()),
- Diags(DiagID, DiagOpts, new IgnoringDiagConsumer()),
+ : FileMgr(FileMgrOpts),
+ Diags(DiagnosticIDs::create(), DiagOpts, new IgnoringDiagConsumer()),
SourceMgr(Diags, FileMgr), TargetOpts(new TargetOptions) {
TargetOpts->Triple = "x86_64-apple-darwin11.1.0";
Target = TargetInfo::CreateTargetInfo(Diags, *TargetOpts);
@@ -102,7 +102,6 @@ protected:
FileSystemOptions FileMgrOpts;
FileManager FileMgr;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID;
DiagnosticOptions DiagOpts;
DiagnosticsEngine Diags;
SourceManager SourceMgr;
diff --git a/clang/unittests/Lex/ModuleDeclStateTest.cpp b/clang/unittests/Lex/ModuleDeclStateTest.cpp
index 6ecba4d..adc6cf1 100644
--- a/clang/unittests/Lex/ModuleDeclStateTest.cpp
+++ b/clang/unittests/Lex/ModuleDeclStateTest.cpp
@@ -54,8 +54,8 @@ public:
class ModuleDeclStateTest : public ::testing::Test {
protected:
ModuleDeclStateTest()
- : FileMgr(FileMgrOpts), DiagID(new DiagnosticIDs()),
- Diags(DiagID, DiagOpts, new IgnoringDiagConsumer()),
+ : FileMgr(FileMgrOpts),
+ Diags(DiagnosticIDs::create(), DiagOpts, new IgnoringDiagConsumer()),
SourceMgr(Diags, FileMgr), TargetOpts(new TargetOptions) {
TargetOpts->Triple = "x86_64-unknown-linux-gnu";
Target = TargetInfo::CreateTargetInfo(Diags, *TargetOpts);
@@ -93,7 +93,6 @@ protected:
FileSystemOptions FileMgrOpts;
FileManager FileMgr;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID;
DiagnosticOptions DiagOpts;
DiagnosticsEngine Diags;
SourceManager SourceMgr;
diff --git a/clang/unittests/Lex/PPCallbacksTest.cpp b/clang/unittests/Lex/PPCallbacksTest.cpp
index af86c18..990689c 100644
--- a/clang/unittests/Lex/PPCallbacksTest.cpp
+++ b/clang/unittests/Lex/PPCallbacksTest.cpp
@@ -133,9 +133,10 @@ public:
class PPCallbacksTest : public ::testing::Test {
protected:
PPCallbacksTest()
- : InMemoryFileSystem(new llvm::vfs::InMemoryFileSystem),
+ : InMemoryFileSystem(
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>()),
FileMgr(FileSystemOptions(), InMemoryFileSystem),
- DiagID(new DiagnosticIDs()),
+ DiagID(DiagnosticIDs::create()),
Diags(DiagID, DiagOpts, new IgnoringDiagConsumer()),
SourceMgr(Diags, FileMgr), TargetOpts(new TargetOptions()) {
TargetOpts->Triple = "x86_64-apple-darwin11.1.0";
diff --git a/clang/unittests/Lex/PPConditionalDirectiveRecordTest.cpp b/clang/unittests/Lex/PPConditionalDirectiveRecordTest.cpp
index 54c1d02..4a88bd4 100644
--- a/clang/unittests/Lex/PPConditionalDirectiveRecordTest.cpp
+++ b/clang/unittests/Lex/PPConditionalDirectiveRecordTest.cpp
@@ -29,8 +29,8 @@ namespace {
class PPConditionalDirectiveRecordTest : public ::testing::Test {
protected:
PPConditionalDirectiveRecordTest()
- : FileMgr(FileMgrOpts), DiagID(new DiagnosticIDs()),
- Diags(DiagID, DiagOpts, new IgnoringDiagConsumer()),
+ : FileMgr(FileMgrOpts),
+ Diags(DiagnosticIDs::create(), DiagOpts, new IgnoringDiagConsumer()),
SourceMgr(Diags, FileMgr), TargetOpts(new TargetOptions) {
TargetOpts->Triple = "x86_64-apple-darwin11.1.0";
Target = TargetInfo::CreateTargetInfo(Diags, *TargetOpts);
@@ -38,7 +38,6 @@ protected:
FileSystemOptions FileMgrOpts;
FileManager FileMgr;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID;
DiagnosticOptions DiagOpts;
DiagnosticsEngine Diags;
SourceManager SourceMgr;
diff --git a/clang/unittests/Lex/PPDependencyDirectivesTest.cpp b/clang/unittests/Lex/PPDependencyDirectivesTest.cpp
index 061cb13..15cc283 100644
--- a/clang/unittests/Lex/PPDependencyDirectivesTest.cpp
+++ b/clang/unittests/Lex/PPDependencyDirectivesTest.cpp
@@ -31,8 +31,8 @@ namespace {
class PPDependencyDirectivesTest : public ::testing::Test {
protected:
PPDependencyDirectivesTest()
- : FileMgr(FileMgrOpts), DiagID(new DiagnosticIDs()),
- Diags(DiagID, DiagOpts, new IgnoringDiagConsumer()),
+ : FileMgr(FileMgrOpts),
+ Diags(DiagnosticIDs::create(), DiagOpts, new IgnoringDiagConsumer()),
SourceMgr(Diags, FileMgr), TargetOpts(new TargetOptions) {
TargetOpts->Triple = "x86_64-apple-macos12";
Target = TargetInfo::CreateTargetInfo(Diags, *TargetOpts);
@@ -40,7 +40,6 @@ protected:
FileSystemOptions FileMgrOpts;
FileManager FileMgr;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID;
DiagnosticOptions DiagOpts;
DiagnosticsEngine Diags;
SourceManager SourceMgr;
@@ -75,7 +74,7 @@ TEST_F(PPDependencyDirectivesTest, MacroGuard) {
// "head2.h" and "head3.h" have tokens following the macro check, they should
// be included multiple times.
- auto VFS = new llvm::vfs::InMemoryFileSystem();
+ auto VFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
VFS->addFile(
"head1.h", 0,
llvm::MemoryBuffer::getMemBuffer("#ifndef H1_H\n#define H1_H\n#endif\n"));
diff --git a/clang/unittests/Lex/PPMemoryAllocationsTest.cpp b/clang/unittests/Lex/PPMemoryAllocationsTest.cpp
index 4d83003..f873774 100644
--- a/clang/unittests/Lex/PPMemoryAllocationsTest.cpp
+++ b/clang/unittests/Lex/PPMemoryAllocationsTest.cpp
@@ -27,8 +27,8 @@ namespace {
class PPMemoryAllocationsTest : public ::testing::Test {
protected:
PPMemoryAllocationsTest()
- : FileMgr(FileMgrOpts), DiagID(new DiagnosticIDs()),
- Diags(DiagID, DiagOpts, new IgnoringDiagConsumer()),
+ : FileMgr(FileMgrOpts),
+ Diags(DiagnosticIDs::create(), DiagOpts, new IgnoringDiagConsumer()),
SourceMgr(Diags, FileMgr), TargetOpts(new TargetOptions) {
TargetOpts->Triple = "x86_64-apple-darwin11.1.0";
Target = TargetInfo::CreateTargetInfo(Diags, *TargetOpts);
@@ -36,7 +36,6 @@ protected:
FileSystemOptions FileMgrOpts;
FileManager FileMgr;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID;
DiagnosticOptions DiagOpts;
DiagnosticsEngine Diags;
SourceManager SourceMgr;
diff --git a/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp b/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp
index 4d08f8d..44f6b04 100644
--- a/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp
+++ b/clang/unittests/Parse/ParseHLSLRootSignatureTest.cpp
@@ -71,8 +71,8 @@ public:
class ParseHLSLRootSignatureTest : public ::testing::Test {
protected:
ParseHLSLRootSignatureTest()
- : FileMgr(FileMgrOpts), DiagID(new DiagnosticIDs()),
- Consumer(new ExpectedDiagConsumer()), Diags(DiagID, DiagOpts, Consumer),
+ : FileMgr(FileMgrOpts), Consumer(new ExpectedDiagConsumer()),
+ Diags(DiagnosticIDs::create(), DiagOpts, Consumer),
SourceMgr(Diags, FileMgr), TargetOpts(new TargetOptions) {
// This is an arbitrarily chosen target triple to create the target info.
TargetOpts->Triple = "dxil";
@@ -114,7 +114,6 @@ protected:
FileSystemOptions FileMgrOpts;
FileManager FileMgr;
- IntrusiveRefCntPtr<DiagnosticIDs> DiagID;
DiagnosticOptions DiagOpts;
ExpectedDiagConsumer *Consumer;
DiagnosticsEngine Diags;
diff --git a/clang/unittests/Sema/SemaNoloadLookupTest.cpp b/clang/unittests/Sema/SemaNoloadLookupTest.cpp
index 5a04f42..e565372 100644
--- a/clang/unittests/Sema/SemaNoloadLookupTest.cpp
+++ b/clang/unittests/Sema/SemaNoloadLookupTest.cpp
@@ -82,7 +82,7 @@ public:
EXPECT_TRUE(Invocation);
CompilerInstance Instance(std::move(Invocation));
- Instance.setDiagnostics(Diags.get());
+ Instance.setDiagnostics(Diags);
Instance.getFrontendOpts().OutputFile = CacheBMIPath;
GenerateReducedModuleInterfaceAction Action;
EXPECT_TRUE(Instance.ExecuteAction(Action));
diff --git a/clang/unittests/Serialization/ForceCheckFileInputTest.cpp b/clang/unittests/Serialization/ForceCheckFileInputTest.cpp
index 970eeef..92ff76b 100644
--- a/clang/unittests/Serialization/ForceCheckFileInputTest.cpp
+++ b/clang/unittests/Serialization/ForceCheckFileInputTest.cpp
@@ -87,7 +87,7 @@ export int aa = 43;
Buf->release();
CompilerInstance Instance(std::move(Invocation));
- Instance.setDiagnostics(Diags.get());
+ Instance.setDiagnostics(Diags);
Instance.getFrontendOpts().OutputFile = BMIPath;
@@ -122,7 +122,7 @@ export int aa = 43;
CompilerInstance Clang(std::move(Invocation));
- Clang.setDiagnostics(Diags.get());
+ Clang.setDiagnostics(Diags);
FileManager *FM = Clang.createFileManager(CIOpts.VFS);
Clang.createSourceManager(*FM);
diff --git a/clang/unittests/Serialization/LoadSpecLazilyTest.cpp b/clang/unittests/Serialization/LoadSpecLazilyTest.cpp
index 6315474..d7b5549 100644
--- a/clang/unittests/Serialization/LoadSpecLazilyTest.cpp
+++ b/clang/unittests/Serialization/LoadSpecLazilyTest.cpp
@@ -80,7 +80,7 @@ public:
EXPECT_TRUE(Invocation);
CompilerInstance Instance(std::move(Invocation));
- Instance.setDiagnostics(Diags.get());
+ Instance.setDiagnostics(Diags);
Instance.getFrontendOpts().OutputFile = CacheBMIPath;
// Avoid memory leaks.
Instance.getFrontendOpts().DisableFree = false;
diff --git a/clang/unittests/Serialization/ModuleCacheTest.cpp b/clang/unittests/Serialization/ModuleCacheTest.cpp
index de6e13a..1f64401 100644
--- a/clang/unittests/Serialization/ModuleCacheTest.cpp
+++ b/clang/unittests/Serialization/ModuleCacheTest.cpp
@@ -121,7 +121,7 @@ TEST_F(ModuleCacheTest, CachedModuleNewPath) {
createInvocationAndEnableFree(Args, CIOpts);
ASSERT_TRUE(Invocation);
CompilerInstance Instance(std::move(Invocation));
- Instance.setDiagnostics(Diags.get());
+ Instance.setDiagnostics(Diags);
SyntaxOnlyAction Action;
ASSERT_TRUE(Instance.ExecuteAction(Action));
ASSERT_FALSE(Diags->hasErrorOccurred());
@@ -145,7 +145,7 @@ TEST_F(ModuleCacheTest, CachedModuleNewPath) {
CompilerInstance Instance2(std::move(Invocation2),
Instance.getPCHContainerOperations(),
&Instance.getModuleCache());
- Instance2.setDiagnostics(Diags.get());
+ Instance2.setDiagnostics(Diags);
SyntaxOnlyAction Action2;
ASSERT_FALSE(Instance2.ExecuteAction(Action2));
ASSERT_TRUE(Diags->hasErrorOccurred());
@@ -171,7 +171,7 @@ TEST_F(ModuleCacheTest, CachedModuleNewPathAllowErrors) {
createInvocationAndEnableFree(Args, CIOpts);
ASSERT_TRUE(Invocation);
CompilerInstance Instance(std::move(Invocation));
- Instance.setDiagnostics(Diags.get());
+ Instance.setDiagnostics(Diags);
SyntaxOnlyAction Action;
ASSERT_TRUE(Instance.ExecuteAction(Action));
ASSERT_FALSE(Diags->hasErrorOccurred());
@@ -189,7 +189,7 @@ TEST_F(ModuleCacheTest, CachedModuleNewPathAllowErrors) {
CompilerInstance Instance2(std::move(Invocation2),
Instance.getPCHContainerOperations(),
&Instance.getModuleCache());
- Instance2.setDiagnostics(Diags.get());
+ Instance2.setDiagnostics(Diags);
SyntaxOnlyAction Action2;
ASSERT_FALSE(Instance2.ExecuteAction(Action2));
ASSERT_TRUE(Diags->hasErrorOccurred());
diff --git a/clang/unittests/Serialization/NoCommentsTest.cpp b/clang/unittests/Serialization/NoCommentsTest.cpp
index 05efeef..ed96c7c 100644
--- a/clang/unittests/Serialization/NoCommentsTest.cpp
+++ b/clang/unittests/Serialization/NoCommentsTest.cpp
@@ -99,7 +99,7 @@ void foo() {}
ASSERT_TRUE(Invocation);
CompilerInstance Instance(std::move(Invocation));
- Instance.setDiagnostics(Diags.get());
+ Instance.setDiagnostics(Diags);
Instance.getFrontendOpts().OutputFile = CacheBMIPath;
GenerateReducedModuleInterfaceAction Action;
ASSERT_TRUE(Instance.ExecuteAction(Action));
diff --git a/clang/unittests/Serialization/PreambleInNamedModulesTest.cpp b/clang/unittests/Serialization/PreambleInNamedModulesTest.cpp
index c43520f..f9d7736 100644
--- a/clang/unittests/Serialization/PreambleInNamedModulesTest.cpp
+++ b/clang/unittests/Serialization/PreambleInNamedModulesTest.cpp
@@ -101,7 +101,7 @@ export using ::E;
PreambleCallbacks Callbacks;
llvm::ErrorOr<PrecompiledPreamble> BuiltPreamble = PrecompiledPreamble::Build(
- *Invocation, Buffer.get(), Bounds, *Diags, VFS,
+ *Invocation, Buffer.get(), Bounds, Diags, VFS,
std::make_shared<PCHContainerOperations>(),
/*StoreInMemory=*/false, /*StoragePath=*/TestDir, Callbacks);
@@ -112,7 +112,7 @@ export using ::E;
BuiltPreamble->OverridePreamble(*Invocation, VFS, Buffer.get());
auto Clang = std::make_unique<CompilerInstance>(std::move(Invocation));
- Clang->setDiagnostics(Diags.get());
+ Clang->setDiagnostics(Diags);
if (auto VFSWithRemapping = createVFSFromCompilerInvocation(
Clang->getInvocation(), Clang->getDiagnostics(), VFS))
diff --git a/clang/unittests/Serialization/VarDeclConstantInitTest.cpp b/clang/unittests/Serialization/VarDeclConstantInitTest.cpp
index 5b2988e..743f851 100644
--- a/clang/unittests/Serialization/VarDeclConstantInitTest.cpp
+++ b/clang/unittests/Serialization/VarDeclConstantInitTest.cpp
@@ -106,7 +106,7 @@ export namespace Fibonacci
Invocation->getFrontendOpts().DisableFree = false;
CompilerInstance Instance(std::move(Invocation));
- Instance.setDiagnostics(Diags.get());
+ Instance.setDiagnostics(Diags);
std::string CacheBMIPath = llvm::Twine(TestDir + "/Cached.pcm").str();
Instance.getFrontendOpts().OutputFile = CacheBMIPath;
diff --git a/clang/unittests/Support/TimeProfilerTest.cpp b/clang/unittests/Support/TimeProfilerTest.cpp
index 85d36b5..f70149d 100644
--- a/clang/unittests/Support/TimeProfilerTest.cpp
+++ b/clang/unittests/Support/TimeProfilerTest.cpp
@@ -46,8 +46,7 @@ std::string teardownProfiler() {
bool compileFromString(StringRef Code, StringRef Standard, StringRef File,
llvm::StringMap<std::string> Headers = {}) {
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> FS(
- new llvm::vfs::InMemoryFileSystem());
+ auto FS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
FS->addFile(File, 0, MemoryBuffer::getMemBuffer(Code));
for (const auto &Header : Headers) {
FS->addFile(Header.getKey(), 0,
diff --git a/clang/unittests/Tooling/CompilationDatabaseTest.cpp b/clang/unittests/Tooling/CompilationDatabaseTest.cpp
index c1febaf..2d68e09 100644
--- a/clang/unittests/Tooling/CompilationDatabaseTest.cpp
+++ b/clang/unittests/Tooling/CompilationDatabaseTest.cpp
@@ -971,7 +971,8 @@ TEST_F(TargetAndModeTest, TargetAndMode) {
class ExpandResponseFilesTest : public MemDBTest {
public:
- ExpandResponseFilesTest() : FS(new llvm::vfs::InMemoryFileSystem) {}
+ ExpandResponseFilesTest()
+ : FS(llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>()) {}
protected:
void addFile(StringRef File, StringRef Content) {
diff --git a/clang/unittests/Tooling/DependencyScanning/DependencyScannerTest.cpp b/clang/unittests/Tooling/DependencyScanning/DependencyScannerTest.cpp
index 1cdb539..b16dd8e 100644
--- a/clang/unittests/Tooling/DependencyScanning/DependencyScannerTest.cpp
+++ b/clang/unittests/Tooling/DependencyScanning/DependencyScannerTest.cpp
@@ -85,7 +85,7 @@ TEST(DependencyScanner, ScanDepsReuseFilemanager) {
StringRef CWD = "/root";
FixedCompilationDatabase CDB(CWD, Compilation);
- auto VFS = new llvm::vfs::InMemoryFileSystem();
+ auto VFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
VFS->setCurrentWorkingDirectory(CWD);
auto Sept = llvm::sys::path::get_separator();
std::string HeaderPath =
@@ -134,7 +134,7 @@ TEST(DependencyScanner, ScanDepsReuseFilemanagerSkippedFile) {
StringRef CWD = "/root";
FixedCompilationDatabase CDB(CWD, Compilation);
- auto VFS = new llvm::vfs::InMemoryFileSystem();
+ auto VFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
VFS->setCurrentWorkingDirectory(CWD);
auto Sept = llvm::sys::path::get_separator();
std::string HeaderPath =
@@ -176,7 +176,7 @@ TEST(DependencyScanner, ScanDepsReuseFilemanagerHasInclude) {
StringRef CWD = "/root";
FixedCompilationDatabase CDB(CWD, Compilation);
- auto VFS = new llvm::vfs::InMemoryFileSystem();
+ auto VFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
VFS->setCurrentWorkingDirectory(CWD);
auto Sept = llvm::sys::path::get_separator();
std::string HeaderPath =
@@ -218,7 +218,7 @@ TEST(DependencyScanner, ScanDepsWithFS) {
"test.cpp.o"};
StringRef CWD = "/root";
- auto VFS = new llvm::vfs::InMemoryFileSystem();
+ auto VFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
VFS->setCurrentWorkingDirectory(CWD);
auto Sept = llvm::sys::path::get_separator();
std::string HeaderPath =
@@ -256,7 +256,7 @@ TEST(DependencyScanner, ScanDepsWithModuleLookup) {
};
StringRef CWD = "/root";
- auto VFS = new llvm::vfs::InMemoryFileSystem();
+ auto VFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
VFS->setCurrentWorkingDirectory(CWD);
auto Sept = llvm::sys::path::get_separator();
std::string OtherPath =
@@ -306,7 +306,7 @@ TEST(DependencyScanner, ScanDepsWithModuleLookup) {
TEST(DependencyScanner, ScanDepsWithDiagConsumer) {
StringRef CWD = "/root";
- auto VFS = new llvm::vfs::InMemoryFileSystem();
+ auto VFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
VFS->setCurrentWorkingDirectory(CWD);
auto Sept = llvm::sys::path::get_separator();
std::string HeaderPath =
diff --git a/clang/unittests/Tooling/RefactoringTest.cpp b/clang/unittests/Tooling/RefactoringTest.cpp
index 35d1143..aff7523e 100644
--- a/clang/unittests/Tooling/RefactoringTest.cpp
+++ b/clang/unittests/Tooling/RefactoringTest.cpp
@@ -1035,8 +1035,7 @@ static constexpr bool usesWindowsPaths() {
TEST(DeduplicateByFileTest, PathsWithDots) {
std::map<std::string, Replacements> FileToReplaces;
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> VFS(
- new llvm::vfs::InMemoryFileSystem());
+ auto VFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
FileManager FileMgr(FileSystemOptions(), VFS);
StringRef Path1 = usesWindowsPaths() ? "a\\b\\..\\.\\c.h" : "a/b/.././c.h";
StringRef Path2 = usesWindowsPaths() ? "a\\c.h" : "a/c.h";
@@ -1051,8 +1050,7 @@ TEST(DeduplicateByFileTest, PathsWithDots) {
TEST(DeduplicateByFileTest, PathWithDotSlash) {
std::map<std::string, Replacements> FileToReplaces;
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> VFS(
- new llvm::vfs::InMemoryFileSystem());
+ auto VFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
FileManager FileMgr(FileSystemOptions(), VFS);
StringRef Path1 = usesWindowsPaths() ? ".\\a\\b\\c.h" : "./a/b/c.h";
StringRef Path2 = usesWindowsPaths() ? "a\\b\\c.h" : "a/b/c.h";
@@ -1067,8 +1065,7 @@ TEST(DeduplicateByFileTest, PathWithDotSlash) {
TEST(DeduplicateByFileTest, NonExistingFilePath) {
std::map<std::string, Replacements> FileToReplaces;
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> VFS(
- new llvm::vfs::InMemoryFileSystem());
+ auto VFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
FileManager FileMgr(FileSystemOptions(), VFS);
StringRef Path1 = usesWindowsPaths() ? ".\\a\\b\\c.h" : "./a/b/c.h";
StringRef Path2 = usesWindowsPaths() ? "a\\b\\c.h" : "a/b/c.h";
diff --git a/clang/unittests/Tooling/RewriterTestContext.h b/clang/unittests/Tooling/RewriterTestContext.h
index 2d697e2..020ef60 100644
--- a/clang/unittests/Tooling/RewriterTestContext.h
+++ b/clang/unittests/Tooling/RewriterTestContext.h
@@ -49,11 +49,12 @@ struct RewriterDiagnosticConsumer : public DiagnosticConsumer {
class RewriterTestContext {
public:
RewriterTestContext()
- : Diagnostics(IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
- DiagOpts),
- InMemoryFileSystem(new llvm::vfs::InMemoryFileSystem),
+ : Diagnostics(DiagnosticIDs::create(), DiagOpts),
+ InMemoryFileSystem(
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>()),
OverlayFileSystem(
- new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem())),
+ llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ llvm::vfs::getRealFileSystem())),
Files(FileSystemOptions(), OverlayFileSystem),
Sources(Diagnostics, Files), Rewrite(Sources, Options) {
Diagnostics.setClient(&DiagnosticPrinter, false);
diff --git a/clang/unittests/Tooling/Syntax/TokensTest.cpp b/clang/unittests/Tooling/Syntax/TokensTest.cpp
index b5f4445..e86793f 100644
--- a/clang/unittests/Tooling/Syntax/TokensTest.cpp
+++ b/clang/unittests/Tooling/Syntax/TokensTest.cpp
@@ -133,7 +133,7 @@ public:
CI->getPreprocessorOpts().addRemappedFile(
FileName, llvm::MemoryBuffer::getMemBufferCopy(Code).release());
CompilerInstance Compiler(std::move(CI));
- Compiler.setDiagnostics(Diags.get());
+ Compiler.setDiagnostics(Diags);
Compiler.setFileManager(FileMgr.get());
Compiler.setSourceManager(SourceMgr.get());
@@ -250,9 +250,10 @@ public:
// Data fields.
DiagnosticOptions DiagOpts;
llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags =
- new DiagnosticsEngine(new DiagnosticIDs, DiagOpts);
+ llvm::makeIntrusiveRefCnt<DiagnosticsEngine>(DiagnosticIDs::create(),
+ DiagOpts);
IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> FS =
- new llvm::vfs::InMemoryFileSystem;
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
llvm::IntrusiveRefCntPtr<FileManager> FileMgr =
new FileManager(FileSystemOptions(), FS);
llvm::IntrusiveRefCntPtr<SourceManager> SourceMgr =
diff --git a/clang/unittests/Tooling/Syntax/TreeTestBase.cpp b/clang/unittests/Tooling/Syntax/TreeTestBase.cpp
index 9f22b1d..4a25863 100644
--- a/clang/unittests/Tooling/Syntax/TreeTestBase.cpp
+++ b/clang/unittests/Tooling/Syntax/TreeTestBase.cpp
@@ -152,7 +152,7 @@ SyntaxTreeTest::buildTree(StringRef Code, const TestClangConfig &ClangConfig) {
Invocation->getPreprocessorOpts().addRemappedFile(
FileName, llvm::MemoryBuffer::getMemBufferCopy(Code).release());
CompilerInstance Compiler(Invocation);
- Compiler.setDiagnostics(Diags.get());
+ Compiler.setDiagnostics(Diags);
Compiler.setFileManager(FileMgr.get());
Compiler.setSourceManager(SourceMgr.get());
diff --git a/clang/unittests/Tooling/Syntax/TreeTestBase.h b/clang/unittests/Tooling/Syntax/TreeTestBase.h
index 6110cff..fce89e2 100644
--- a/clang/unittests/Tooling/Syntax/TreeTestBase.h
+++ b/clang/unittests/Tooling/Syntax/TreeTestBase.h
@@ -42,9 +42,10 @@ protected:
// Data fields.
DiagnosticOptions DiagOpts;
IntrusiveRefCntPtr<DiagnosticsEngine> Diags =
- new DiagnosticsEngine(new DiagnosticIDs, DiagOpts);
+ llvm::makeIntrusiveRefCnt<DiagnosticsEngine>(DiagnosticIDs::create(),
+ DiagOpts);
IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> FS =
- new llvm::vfs::InMemoryFileSystem;
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
IntrusiveRefCntPtr<FileManager> FileMgr =
new FileManager(FileSystemOptions(), FS);
IntrusiveRefCntPtr<SourceManager> SourceMgr =
diff --git a/clang/unittests/Tooling/ToolingTest.cpp b/clang/unittests/Tooling/ToolingTest.cpp
index 32af4b6..c72676f 100644
--- a/clang/unittests/Tooling/ToolingTest.cpp
+++ b/clang/unittests/Tooling/ToolingTest.cpp
@@ -153,8 +153,8 @@ TEST(buildASTFromCode, ReportsErrors) {
}
TEST(buildASTFromCode, FileSystem) {
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
InMemoryFileSystem->addFile("included_file.h", 0,
llvm::MemoryBuffer::getMemBufferCopy("class X;"));
std::unique_ptr<ASTUnit> AST = buildASTFromCodeWithArgs(
@@ -188,10 +188,11 @@ TEST(newFrontendActionFactory, CreatesFrontendActionFactoryFromFactoryType) {
}
TEST(ToolInvocation, TestMapVirtualFile) {
- llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
- new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto OverlayFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ llvm::vfs::getRealFileSystem());
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
llvm::IntrusiveRefCntPtr<FileManager> Files(
new FileManager(FileSystemOptions(), OverlayFileSystem));
@@ -214,10 +215,11 @@ TEST(ToolInvocation, TestVirtualModulesCompilation) {
// mapped module.modulemap is found on the include path. In the future, expand
// this test to run a full modules enabled compilation, so we make sure we can
// rerun modules compilations with a virtual file system.
- llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
- new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto OverlayFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ llvm::vfs::getRealFileSystem());
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
llvm::IntrusiveRefCntPtr<FileManager> Files(
new FileManager(FileSystemOptions(), OverlayFileSystem));
@@ -240,10 +242,11 @@ TEST(ToolInvocation, TestVirtualModulesCompilation) {
}
TEST(ToolInvocation, DiagnosticsEngineProperlyInitializedForCC1Construction) {
- llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
- new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto OverlayFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ llvm::vfs::getRealFileSystem());
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
llvm::IntrusiveRefCntPtr<FileManager> Files(
new FileManager(FileSystemOptions(), OverlayFileSystem));
@@ -269,10 +272,11 @@ TEST(ToolInvocation, DiagnosticsEngineProperlyInitializedForCC1Construction) {
}
TEST(ToolInvocation, CustomDiagnosticOptionsOverwriteParsedOnes) {
- llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
- new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto OverlayFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ llvm::vfs::getRealFileSystem());
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
llvm::IntrusiveRefCntPtr<FileManager> Files(
new FileManager(FileSystemOptions(), OverlayFileSystem));
@@ -315,10 +319,11 @@ struct DiagnosticConsumerExpectingSourceManager : public DiagnosticConsumer {
};
TEST(ToolInvocation, DiagConsumerExpectingSourceManager) {
- llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
- new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto OverlayFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ llvm::vfs::getRealFileSystem());
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
llvm::IntrusiveRefCntPtr<FileManager> Files(
new FileManager(FileSystemOptions(), OverlayFileSystem));
@@ -341,10 +346,11 @@ TEST(ToolInvocation, DiagConsumerExpectingSourceManager) {
}
TEST(ToolInvocation, CC1Args) {
- llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
- new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto OverlayFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ llvm::vfs::getRealFileSystem());
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
llvm::IntrusiveRefCntPtr<FileManager> Files(
new FileManager(FileSystemOptions(), OverlayFileSystem));
@@ -361,10 +367,11 @@ TEST(ToolInvocation, CC1Args) {
}
TEST(ToolInvocation, CC1ArgsInvalid) {
- llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
- new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto OverlayFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ llvm::vfs::getRealFileSystem());
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
llvm::IntrusiveRefCntPtr<FileManager> Files(
new FileManager(FileSystemOptions(), OverlayFileSystem));
@@ -398,7 +405,7 @@ struct CommandLineExtractorTest : public ::testing::Test {
public:
CommandLineExtractorTest()
- : InMemoryFS(new llvm::vfs::InMemoryFileSystem),
+ : InMemoryFS(llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>()),
Diags(CompilerInstance::createDiagnostics(*InMemoryFS, DiagOpts)),
Driver("clang", llvm::sys::getDefaultTargetTriple(), *Diags,
"clang LLVM compiler", overlayRealFS(InMemoryFS)) {}
@@ -755,10 +762,11 @@ TEST(ClangToolTest, NoOutputCommands) {
TEST(ClangToolTest, BaseVirtualFileSystemUsage) {
FixedCompilationDatabase Compilations("/", std::vector<std::string>());
- llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
- new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto OverlayFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ llvm::vfs::getRealFileSystem());
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
InMemoryFileSystem->addFile(
diff --git a/compiler-rt/include/profile/MemProfData.inc b/compiler-rt/include/profile/MemProfData.inc
index 3f785bd..26baddd 100644
--- a/compiler-rt/include/profile/MemProfData.inc
+++ b/compiler-rt/include/profile/MemProfData.inc
@@ -33,11 +33,10 @@
(uint64_t)'o' << 24 | (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129)
// The version number of the raw binary format.
-#define MEMPROF_RAW_VERSION 4ULL
+#define MEMPROF_RAW_VERSION 5ULL
// Currently supported versions.
-#define MEMPROF_RAW_SUPPORTED_VERSIONS \
- { 3ULL, 4ULL }
+#define MEMPROF_RAW_SUPPORTED_VERSIONS {3ULL, 4ULL, 5ULL}
#define MEMPROF_V3_MIB_SIZE 132ULL;
@@ -229,6 +228,41 @@ void Merge(const MemInfoBlock &newMIB) {
} __attribute__((__packed__));
#endif
+constexpr int MantissaBits = 12;
+constexpr int ExponentBits = 4;
+constexpr uint16_t MaxMantissa = (1U << MantissaBits) - 1;
+constexpr uint16_t MaxExponent = (1U << ExponentBits) - 1;
+constexpr uint64_t MaxRepresentableValue = static_cast<uint64_t>(MaxMantissa)
+ << MaxExponent;
+
+// Encodes a 64-bit unsigned integer into a 16-bit scaled integer format.
+inline uint16_t encodeHistogramCount(uint64_t Count) {
+ if (Count == 0)
+ return 0;
+
+ if (Count > MaxRepresentableValue)
+ Count = MaxRepresentableValue;
+
+ if (Count <= MaxMantissa)
+ return Count;
+
+ uint64_t M = Count;
+ uint16_t E = 0;
+ while (M > MaxMantissa) {
+ M = (M + 1) >> 1;
+ E++;
+ }
+ return (E << MantissaBits) | static_cast<uint16_t>(M);
+}
+
+// Decodes a 16-bit scaled integer and returns the
+// decoded 64-bit unsigned integer.
+inline uint64_t decodeHistogramCount(uint16_t EncodedValue) {
+ const uint16_t E = EncodedValue >> MantissaBits;
+ const uint16_t M = EncodedValue & MaxMantissa;
+ return static_cast<uint64_t>(M) << E;
+}
+
} // namespace memprof
} // namespace llvm
diff --git a/compiler-rt/lib/memprof/memprof_interface_internal.h b/compiler-rt/lib/memprof/memprof_interface_internal.h
index 7d3a937..1fd0748 100644
--- a/compiler-rt/lib/memprof/memprof_interface_internal.h
+++ b/compiler-rt/lib/memprof/memprof_interface_internal.h
@@ -36,8 +36,14 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __memprof_record_access(void const volatile *addr);
SANITIZER_INTERFACE_ATTRIBUTE
+void __memprof_record_access_hist(void const volatile *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
void __memprof_record_access_range(void const volatile *addr, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __memprof_record_access_range_hist(void const volatile *addr, uptr size);
+
SANITIZER_INTERFACE_ATTRIBUTE void __memprof_print_accumulated_stats();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE extern char
@@ -51,6 +57,10 @@ extern uptr __memprof_shadow_memory_dynamic_address;
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE extern char
__memprof_profile_filename[1];
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE extern bool
+ __memprof_histogram;
+
SANITIZER_INTERFACE_ATTRIBUTE int __memprof_profile_dump();
SANITIZER_INTERFACE_ATTRIBUTE void __memprof_profile_reset();
diff --git a/compiler-rt/lib/memprof/memprof_rawprofile.cpp b/compiler-rt/lib/memprof/memprof_rawprofile.cpp
index a897648..f579e12 100644
--- a/compiler-rt/lib/memprof/memprof_rawprofile.cpp
+++ b/compiler-rt/lib/memprof/memprof_rawprofile.cpp
@@ -7,10 +7,7 @@
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_array_ref.h"
#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_linux.h"
-#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
-#include "sanitizer_common/sanitizer_stackdepotbase.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_vector.h"
@@ -19,10 +16,20 @@ using ::__sanitizer::Vector;
using ::llvm::memprof::MemInfoBlock;
using SegmentEntry = ::llvm::memprof::SegmentEntry;
using Header = ::llvm::memprof::Header;
+using ::llvm::memprof::encodeHistogramCount;
namespace {
template <class T> char *WriteBytes(const T &Pod, char *Buffer) {
- *(T *)Buffer = Pod;
+ static_assert(is_trivially_copyable<T>::value, "T must be POD");
+ const uint8_t *Src = reinterpret_cast<const uint8_t *>(&Pod);
+
+ for (size_t I = 0; I < sizeof(T); ++I)
+#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ // Reverse byte order since reader is little-endian.
+ Buffer[I] = Src[sizeof(T) - 1 - I];
+#else
+ Buffer[I] = Src[I];
+#endif
return Buffer + sizeof(T);
}
@@ -32,7 +39,6 @@ void RecordStackId(const uptr Key, UNUSED LockedMemInfoBlock *const &MIB,
auto *StackIds = reinterpret_cast<Vector<u64> *>(Arg);
StackIds->PushBack(Key);
}
-} // namespace
u64 SegmentSizeBytes(ArrayRef<LoadedModule> Modules) {
u64 NumSegmentsToRecord = 0;
@@ -169,18 +175,21 @@ void SerializeMIBInfoToBuffer(MIBMapTy &MIBMap, const Vector<u64> &StackIds,
// FIXME: We unnecessarily serialize the AccessHistogram pointer. Adding a
// serialization schema will fix this issue. See also FIXME in
// deserialization.
- Ptr = WriteBytes((*h)->mib, Ptr);
- for (u64 j = 0; j < (*h)->mib.AccessHistogramSize; ++j) {
- u64 HistogramEntry = ((u64 *)((*h)->mib.AccessHistogram))[j];
+ auto &MIB = (*h)->mib;
+ Ptr = WriteBytes(MIB, Ptr);
+ for (u64 j = 0; j < MIB.AccessHistogramSize; ++j) {
+ u16 HistogramEntry =
+ encodeHistogramCount(((u64 *)(MIB.AccessHistogram))[j]);
Ptr = WriteBytes(HistogramEntry, Ptr);
}
- if ((*h)->mib.AccessHistogramSize > 0) {
- InternalFree((void *)((*h)->mib.AccessHistogram));
+ if (MIB.AccessHistogramSize > 0) {
+ InternalFree((void *)MIB.AccessHistogram);
}
}
CHECK(ExpectedNumBytes >= static_cast<u64>(Ptr - Buffer) &&
"Expected num bytes != actual bytes written");
}
+} // namespace
// Format
// ---------- Header
@@ -249,7 +258,7 @@ u64 SerializeToRawProfile(MIBMapTy &MIBMap, ArrayRef<LoadedModule> Modules,
},
reinterpret_cast<void *>(&TotalAccessHistogramEntries));
const u64 NumHistogramBytes =
- RoundUpTo(TotalAccessHistogramEntries * sizeof(uint64_t), 8);
+ RoundUpTo(TotalAccessHistogramEntries * sizeof(uint16_t), 8);
const u64 NumStackBytes = RoundUpTo(StackSizeBytes(StackIds), 8);
@@ -285,5 +294,4 @@ u64 SerializeToRawProfile(MIBMapTy &MIBMap, ArrayRef<LoadedModule> Modules,
return TotalSizeBytes;
}
-
} // namespace __memprof
diff --git a/compiler-rt/lib/memprof/tests/CMakeLists.txt b/compiler-rt/lib/memprof/tests/CMakeLists.txt
index 0b5c302..1603d47 100644
--- a/compiler-rt/lib/memprof/tests/CMakeLists.txt
+++ b/compiler-rt/lib/memprof/tests/CMakeLists.txt
@@ -26,6 +26,7 @@ set(MEMPROF_SOURCES
../memprof_rawprofile.cpp)
set(MEMPROF_UNITTESTS
+ histogram_encoding.cpp
rawprofile.cpp
driver.cpp)
diff --git a/compiler-rt/lib/memprof/tests/histogram_encoding.cpp b/compiler-rt/lib/memprof/tests/histogram_encoding.cpp
new file mode 100644
index 0000000..be20595
--- /dev/null
+++ b/compiler-rt/lib/memprof/tests/histogram_encoding.cpp
@@ -0,0 +1,35 @@
+#include <cstdint>
+#include <vector>
+
+#include "profile/MemProfData.inc"
+#include "gtest/gtest.h"
+
+namespace llvm {
+namespace memprof {
+namespace {
+TEST(MemProf, F16EncodeDecode) {
+ const std::vector<uint64_t> TestCases = {
+ 0, 100, 4095, 4096, 5000, 8191, 65535, 1000000, 134213640, 200000000,
+ };
+
+ for (const uint64_t TestCase : TestCases) {
+ const uint16_t Encoded = encodeHistogramCount(TestCase);
+ const uint64_t Decoded = decodeHistogramCount(Encoded);
+
+ const uint64_t MaxRepresentable = static_cast<uint64_t>(MaxMantissa)
+ << MaxExponent;
+
+ if (TestCase >= MaxRepresentable) {
+ EXPECT_EQ(Decoded, MaxRepresentable);
+ } else if (TestCase <= MaxMantissa) {
+ EXPECT_EQ(Decoded, TestCase);
+ } else {
+ // The decoded value should be close to the original value.
+ // The error should be less than 1/1024 for larger numbers.
+ EXPECT_NEAR(Decoded, TestCase, static_cast<double>(TestCase) / 1024.0);
+ }
+ }
+}
+} // namespace
+} // namespace memprof
+} // namespace llvm
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def
index 84fcec0..7485308 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.def
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.def
@@ -54,6 +54,9 @@ BASE_REQUIRED_TEMPLATE_TYPE(SecondaryT)
// Indicates possible support for Memory Tagging.
BASE_OPTIONAL(const bool, MaySupportMemoryTagging, false)
+// Disable the quarantine code.
+BASE_OPTIONAL(const bool, QuarantineDisabled, false)
+
// PRIMARY_REQUIRED_TYPE(NAME)
//
// SizeClassMap to use with the Primary.
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h b/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h
index ac639ee..5bfa700 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h
+++ b/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h
@@ -60,6 +60,10 @@ template <typename AllocatorConfig> struct PrimaryConfig {
return BaseConfig<AllocatorConfig>::getMaySupportMemoryTagging();
}
+ static constexpr bool getQuarantineDisabled() {
+ return BaseConfig<AllocatorConfig>::getQuarantineDisabled();
+ }
+
#define PRIMARY_REQUIRED_TYPE(NAME) \
using NAME = typename AllocatorConfig::Primary::NAME;
@@ -92,6 +96,10 @@ template <typename AllocatorConfig> struct SecondaryConfig {
return BaseConfig<AllocatorConfig>::getMaySupportMemoryTagging();
}
+ static constexpr bool getQuarantineDisabled() {
+ return BaseConfig<AllocatorConfig>::getQuarantineDisabled();
+ }
+
#define SECONDARY_REQUIRED_TEMPLATE_TYPE(NAME) \
template <typename T> \
using NAME = typename AllocatorConfig::Secondary::template NAME<T>;
@@ -111,6 +119,10 @@ template <typename AllocatorConfig> struct SecondaryConfig {
return BaseConfig<AllocatorConfig>::getMaySupportMemoryTagging();
}
+ static constexpr bool getQuarantineDisabled() {
+ return BaseConfig<AllocatorConfig>::getQuarantineDisabled();
+ }
+
#define SECONDARY_CACHE_OPTIONAL(TYPE, NAME, DEFAULT) \
OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT, Cache::NAME) \
static constexpr removeConst<TYPE>::type get##NAME() { \
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 87acdec..985bfb4 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -184,9 +184,11 @@ public:
const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
Primary.init(ReleaseToOsIntervalMs);
Secondary.init(&Stats, ReleaseToOsIntervalMs);
- Quarantine.init(
- static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
- static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
+ if (!AllocatorConfig::getQuarantineDisabled()) {
+ Quarantine.init(
+ static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
+ static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
+ }
}
void enableRingBuffer() NO_THREAD_SAFETY_ANALYSIS {
@@ -276,16 +278,20 @@ public:
// the last two items).
void commitBack(TSD<ThisT> *TSD) {
TSD->assertLocked(/*BypassCheck=*/true);
- Quarantine.drain(&TSD->getQuarantineCache(),
- QuarantineCallback(*this, TSD->getSizeClassAllocator()));
+ if (!AllocatorConfig::getQuarantineDisabled()) {
+ Quarantine.drain(&TSD->getQuarantineCache(),
+ QuarantineCallback(*this, TSD->getSizeClassAllocator()));
+ }
TSD->getSizeClassAllocator().destroy(&Stats);
}
void drainCache(TSD<ThisT> *TSD) {
TSD->assertLocked(/*BypassCheck=*/true);
- Quarantine.drainAndRecycle(
- &TSD->getQuarantineCache(),
- QuarantineCallback(*this, TSD->getSizeClassAllocator()));
+ if (!AllocatorConfig::getQuarantineDisabled()) {
+ Quarantine.drainAndRecycle(
+ &TSD->getQuarantineCache(),
+ QuarantineCallback(*this, TSD->getSizeClassAllocator()));
+ }
TSD->getSizeClassAllocator().drain();
}
void drainCaches() { TSDRegistry.drainCaches(this); }
@@ -612,7 +618,8 @@ public:
#endif
TSDRegistry.disable();
Stats.disable();
- Quarantine.disable();
+ if (!AllocatorConfig::getQuarantineDisabled())
+ Quarantine.disable();
Primary.disable();
Secondary.disable();
disableRingBuffer();
@@ -623,7 +630,8 @@ public:
enableRingBuffer();
Secondary.enable();
Primary.enable();
- Quarantine.enable();
+ if (!AllocatorConfig::getQuarantineDisabled())
+ Quarantine.enable();
Stats.enable();
TSDRegistry.enable();
#ifdef GWP_ASAN_HOOKS
@@ -1252,7 +1260,8 @@ private:
// If the quarantine is disabled, the actual size of a chunk is 0 or larger
// than the maximum allowed, we return a chunk directly to the backend.
// This purposefully underflows for Size == 0.
- const bool BypassQuarantine = !Quarantine.getCacheSize() ||
+ const bool BypassQuarantine = AllocatorConfig::getQuarantineDisabled() ||
+ !Quarantine.getCacheSize() ||
((Size - 1) >= QuarantineMaxChunkSize) ||
!Header->ClassId;
if (BypassQuarantine)
@@ -1642,7 +1651,8 @@ private:
uptr getStats(ScopedString *Str) {
Primary.getStats(Str);
Secondary.getStats(Str);
- Quarantine.getStats(Str);
+ if (!AllocatorConfig::getQuarantineDisabled())
+ Quarantine.getStats(Str);
TSDRegistry.getStats(Str);
return Str->length();
}
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index f04c5b7..38c9a9e 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -312,7 +312,7 @@ public:
break;
}
- if (Config::getQuarantineSize()) {
+ if (!Config::getQuarantineDisabled() && Config::getQuarantineSize()) {
QuarantinePos =
(QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u);
if (!Quarantine[QuarantinePos].isValid()) {
@@ -508,14 +508,16 @@ public:
void disableMemoryTagging() EXCLUDES(Mutex) {
ScopedLock L(Mutex);
- for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
- if (Quarantine[I].isValid()) {
- MemMapT &MemMap = Quarantine[I].MemMap;
- unmapCallBack(MemMap);
- Quarantine[I].invalidate();
+ if (!Config::getQuarantineDisabled()) {
+ for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
+ if (Quarantine[I].isValid()) {
+ MemMapT &MemMap = Quarantine[I].MemMap;
+ unmapCallBack(MemMap);
+ Quarantine[I].invalidate();
+ }
}
+ QuarantinePos = -1U;
}
- QuarantinePos = -1U;
for (CachedBlock &Entry : LRUEntries)
Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
@@ -575,8 +577,9 @@ private:
if (!LRUEntries.size() || OldestTime == 0 || OldestTime > Time)
return;
OldestTime = 0;
- for (uptr I = 0; I < Config::getQuarantineSize(); I++)
- releaseIfOlderThan(Quarantine[I], Time);
+ if (!Config::getQuarantineDisabled())
+ for (uptr I = 0; I < Config::getQuarantineSize(); I++)
+ releaseIfOlderThan(Quarantine[I], Time);
for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
releaseIfOlderThan(Entries[I], Time);
}
diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index 7e8d5b4..1eff9eb 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -623,20 +623,20 @@ SCUDO_TYPED_TEST(ScudoCombinedDeathTest, DisableMemoryTagging) {
SCUDO_TYPED_TEST(ScudoCombinedTest, Stats) {
auto *Allocator = this->Allocator.get();
- scudo::uptr BufferSize = 8192;
- std::vector<char> Buffer(BufferSize);
- scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
- while (ActualSize > BufferSize) {
- BufferSize = ActualSize + 1024;
- Buffer.resize(BufferSize);
- ActualSize = Allocator->getStats(Buffer.data(), BufferSize);
+ std::string Stats(10000, '\0');
+ scudo::uptr ActualSize = Allocator->getStats(Stats.data(), Stats.size());
+ if (ActualSize > Stats.size()) {
+ Stats.resize(ActualSize);
+ ActualSize = Allocator->getStats(Stats.data(), Stats.size());
}
- std::string Stats(Buffer.begin(), Buffer.end());
+ EXPECT_GE(Stats.size(), ActualSize);
+
// Basic checks on the contents of the statistics output, which also allows us
// to verify that we got it all.
EXPECT_NE(Stats.find("Stats: SizeClassAllocator"), std::string::npos);
EXPECT_NE(Stats.find("Stats: MapAllocator"), std::string::npos);
- EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
+ // Do not explicitly check for quarantine stats since a config can disable
+ // them. Other tests verify this (QuarantineEnabled/QuarantineDisabled).
}
SCUDO_TYPED_TEST_SKIP_THREAD_SAFETY(ScudoCombinedTest, Drain) {
@@ -1076,3 +1076,88 @@ TEST(ScudoCombinedTest, BasicTrustyConfig) {
#endif
#endif
+
+struct TestQuarantineSizeClassConfig {
+ static const scudo::uptr NumBits = 1;
+ static const scudo::uptr MinSizeLog = 10;
+ static const scudo::uptr MidSizeLog = 10;
+ static const scudo::uptr MaxSizeLog = 13;
+ static const scudo::u16 MaxNumCachedHint = 8;
+ static const scudo::uptr MaxBytesCachedLog = 12;
+ static const scudo::uptr SizeDelta = 0;
+};
+
+struct TestQuarantineConfig {
+ static const bool MaySupportMemoryTagging = false;
+
+ template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
+
+ struct Primary {
+ // Tiny allocator, its Primary only serves chunks of four sizes.
+ using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
+ static const scudo::uptr RegionSizeLog = DeathRegionSizeLog;
+ static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ typedef scudo::uptr CompactPtrT;
+ static const scudo::uptr CompactPtrScale = 0;
+ static const bool EnableRandomOffset = true;
+ static const scudo::uptr MapSizeIncrement = 1UL << 18;
+ static const scudo::uptr GroupSizeLog = 18;
+ };
+ template <typename Config>
+ using PrimaryT = scudo::SizeClassAllocator64<Config>;
+
+ struct Secondary {
+ template <typename Config>
+ using CacheT = scudo::MapAllocatorNoCache<Config>;
+ };
+
+ template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
+};
+
+// Verify that the quarantine exists by default.
+TEST(ScudoCombinedTest, QuarantineEnabled) {
+ using AllocatorT = scudo::Allocator<TestQuarantineConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ const scudo::uptr Size = 1000U;
+ void *P = Allocator->allocate(Size, Origin);
+ EXPECT_NE(P, nullptr);
+ Allocator->deallocate(P, Origin);
+
+ std::string Stats(10000, '\0');
+ scudo::uptr ActualSize = Allocator->getStats(Stats.data(), Stats.size());
+ if (ActualSize > Stats.size()) {
+ Stats.resize(ActualSize);
+ ActualSize = Allocator->getStats(Stats.data(), Stats.size());
+ }
+ EXPECT_GE(Stats.size(), ActualSize);
+
+ // Quarantine stats should be present.
+ EXPECT_NE(Stats.find("Stats: Quarantine"), std::string::npos);
+}
+
+struct TestQuarantineDisabledConfig : TestQuarantineConfig {
+ static const bool QuarantineDisabled = true;
+};
+
+TEST(ScudoCombinedTest, QuarantineDisabled) {
+ using AllocatorT = scudo::Allocator<TestQuarantineDisabledConfig>;
+ auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
+
+ const scudo::uptr Size = 1000U;
+ void *P = Allocator->allocate(Size, Origin);
+ EXPECT_NE(P, nullptr);
+ Allocator->deallocate(P, Origin);
+
+ std::string Stats(10000, '\0');
+ scudo::uptr ActualSize = Allocator->getStats(Stats.data(), Stats.size());
+ if (ActualSize > Stats.size()) {
+ Stats.resize(ActualSize);
+ ActualSize = Allocator->getStats(Stats.data(), Stats.size());
+ }
+ EXPECT_GE(Stats.size(), ActualSize);
+
+ // No quarantine stats should not be present.
+ EXPECT_EQ(Stats.find("Stats: Quarantine"), std::string::npos);
+}
diff --git a/compiler-rt/test/memprof/TestCases/memprof_histogram_uint8.cpp b/compiler-rt/test/memprof/TestCases/memprof_histogram_uint8.cpp
new file mode 100644
index 0000000..ef6fb33
--- /dev/null
+++ b/compiler-rt/test/memprof/TestCases/memprof_histogram_uint8.cpp
@@ -0,0 +1,38 @@
+// Test the histogram support in memprof using the text format output.
+// Shadow memory counters per object are limited to 8b. In memory counters
+// aggregating counts across multiple objects are 64b.
+
+// RUN: %clangxx_memprof -O0 -mllvm -memprof-histogram -mllvm -memprof-use-callbacks=true %s -o %t
+// RUN: %env_memprof_opts=print_text=1:histogram=1:log_path=stdout %run %t 2>&1 | FileCheck %s
+
+#include <stdio.h>
+#include <stdlib.h>
+
+int main() {
+ // Allocate memory that will create a histogram
+ char *buffer = (char *)malloc(1024);
+ if (!buffer)
+ return 1;
+
+ for (int i = 0; i < 10; ++i) {
+ // Access every 8th byte (since shadow granularity is 8b.
+ buffer[i * 8] = 'A';
+ }
+
+ for (int j = 0; j < 200; ++j) {
+ buffer[8] = 'B'; // Count = previous count + 200
+ }
+
+ for (int j = 0; j < 400; ++j) {
+ buffer[16] = 'B'; // Count is saturated at 255
+ }
+
+ // Free the memory to trigger MIB creation with histogram
+ free(buffer);
+
+ printf("Test completed successfully\n");
+ return 0;
+}
+
+// CHECK: AccessCountHistogram[128]: 1 201 255 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+// CHECK: Test completed successfully
diff --git a/compiler-rt/test/ubsan_minimal/TestCases/alignment-assumption.c b/compiler-rt/test/ubsan_minimal/TestCases/alignment-assumption.c
index acc3e855..134c143 100644
--- a/compiler-rt/test/ubsan_minimal/TestCases/alignment-assumption.c
+++ b/compiler-rt/test/ubsan_minimal/TestCases/alignment-assumption.c
@@ -1,4 +1,4 @@
-// RUN: %clang -fsanitize=alignment %s -o %t && %run %t 2>&1 | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_min_runtime -fsanitize=alignment %s -o %t && %run %t 2>&1 | FileCheck %s --check-prefixes=CHECK
#include <stdlib.h>
diff --git a/compiler-rt/test/ubsan_minimal/TestCases/icall.c b/compiler-rt/test/ubsan_minimal/TestCases/icall.c
index 6948057..a0953b8 100644
--- a/compiler-rt/test/ubsan_minimal/TestCases/icall.c
+++ b/compiler-rt/test/ubsan_minimal/TestCases/icall.c
@@ -1,5 +1,5 @@
-// RUN: %clang -fsanitize=cfi-icall -fno-sanitize-trap=cfi-icall -fuse-ld=lld -flto -fvisibility=hidden %s -o %t && not --crash %run %t 2>&1 | FileCheck %s
-// RUN: %clang -fsanitize=cfi-icall -fno-sanitize-trap=cfi-icall -fsanitize-recover=cfi-icall -fuse-ld=lld -flto -fvisibility=hidden %s -o %t && %run %t 2>&1 | FileCheck %s
+// RUN: %clang_min_runtime -fsanitize=cfi-icall -fno-sanitize-trap=cfi-icall -fuse-ld=lld -flto -fvisibility=hidden %s -o %t && not --crash %run %t 2>&1 | FileCheck %s
+// RUN: %clang_min_runtime -fsanitize=cfi-icall -fno-sanitize-trap=cfi-icall -fsanitize-recover=cfi-icall -fuse-ld=lld -flto -fvisibility=hidden %s -o %t && %run %t 2>&1 | FileCheck %s
// REQUIRES: lld-available, cfi
diff --git a/compiler-rt/test/ubsan_minimal/TestCases/implicit-integer-sign-change.c b/compiler-rt/test/ubsan_minimal/TestCases/implicit-integer-sign-change.c
index 0f1bbbf..1e3a14c 100644
--- a/compiler-rt/test/ubsan_minimal/TestCases/implicit-integer-sign-change.c
+++ b/compiler-rt/test/ubsan_minimal/TestCases/implicit-integer-sign-change.c
@@ -1,4 +1,4 @@
-// RUN: %clang -fsanitize=implicit-integer-sign-change %s -o %t && %run %t 2>&1 | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change %s -o %t && %run %t 2>&1 | FileCheck %s --check-prefixes=CHECK
#include <stdint.h>
diff --git a/compiler-rt/test/ubsan_minimal/TestCases/implicit-signed-integer-truncation-or-sign-change.c b/compiler-rt/test/ubsan_minimal/TestCases/implicit-signed-integer-truncation-or-sign-change.c
index e9f26dd..a05af6b 100644
--- a/compiler-rt/test/ubsan_minimal/TestCases/implicit-signed-integer-truncation-or-sign-change.c
+++ b/compiler-rt/test/ubsan_minimal/TestCases/implicit-signed-integer-truncation-or-sign-change.c
@@ -1,4 +1,4 @@
-// RUN: %clang -fsanitize=implicit-signed-integer-truncation,implicit-integer-sign-change %s -o %t && %run %t 2>&1 | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_min_runtime -fsanitize=implicit-signed-integer-truncation,implicit-integer-sign-change %s -o %t && %run %t 2>&1 | FileCheck %s --check-prefixes=CHECK
#include <stdint.h>
diff --git a/compiler-rt/test/ubsan_minimal/TestCases/implicit-signed-integer-truncation.c b/compiler-rt/test/ubsan_minimal/TestCases/implicit-signed-integer-truncation.c
index dc8d775..945c033 100644
--- a/compiler-rt/test/ubsan_minimal/TestCases/implicit-signed-integer-truncation.c
+++ b/compiler-rt/test/ubsan_minimal/TestCases/implicit-signed-integer-truncation.c
@@ -1,4 +1,4 @@
-// RUN: %clang -fsanitize=implicit-signed-integer-truncation %s -o %t && %run %t 2>&1 | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_min_runtime -fsanitize=implicit-signed-integer-truncation %s -o %t && %run %t 2>&1 | FileCheck %s --check-prefixes=CHECK
#include <stdint.h>
diff --git a/compiler-rt/test/ubsan_minimal/TestCases/implicit-unsigned-integer-truncation.c b/compiler-rt/test/ubsan_minimal/TestCases/implicit-unsigned-integer-truncation.c
index 77d38f5..35515b7 100644
--- a/compiler-rt/test/ubsan_minimal/TestCases/implicit-unsigned-integer-truncation.c
+++ b/compiler-rt/test/ubsan_minimal/TestCases/implicit-unsigned-integer-truncation.c
@@ -1,4 +1,4 @@
-// RUN: %clang -fsanitize=implicit-unsigned-integer-truncation %s -o %t && %run %t 2>&1 | FileCheck %s --check-prefixes=CHECK
+// RUN: %clang_min_runtime -fsanitize=implicit-unsigned-integer-truncation %s -o %t && %run %t 2>&1 | FileCheck %s --check-prefixes=CHECK
#include <stdint.h>
diff --git a/compiler-rt/test/ubsan_minimal/TestCases/local_bounds.cpp b/compiler-rt/test/ubsan_minimal/TestCases/local_bounds.cpp
index c972e1e..4b542fa 100644
--- a/compiler-rt/test/ubsan_minimal/TestCases/local_bounds.cpp
+++ b/compiler-rt/test/ubsan_minimal/TestCases/local_bounds.cpp
@@ -1,7 +1,7 @@
-// RUN: %clangxx -fsanitize=local-bounds %s -O3 -o %t && %run %t 1
-// RUN: %clangxx -fsanitize=local-bounds %s -O3 -o %t && not --crash %run %t 3
-// RUN: %clangxx -fsanitize=local-bounds -fno-sanitize-trap=local-bounds %s -O3 -o %t && not --crash %run %t 3 2>&1 | FileCheck %s
-// RUN: %clangxx -fsanitize=local-bounds -fno-sanitize-trap=local-bounds -fsanitize-recover=local-bounds %s -O3 -o %t && %run %t 3 2>&1 | FileCheck %s
+// RUN: %clangxx_min_runtime -fsanitize=local-bounds %s -O3 -o %t && %run %t 1
+// RUN: %clangxx_min_runtime -fsanitize=local-bounds %s -O3 -o %t && not --crash %run %t 3
+// RUN: %clangxx_min_runtime -fsanitize=local-bounds -fno-sanitize-trap=local-bounds %s -O3 -o %t && not --crash %run %t 3 2>&1 | FileCheck %s
+// RUN: %clangxx_min_runtime -fsanitize=local-bounds -fno-sanitize-trap=local-bounds -fsanitize-recover=local-bounds %s -O3 -o %t && %run %t 3 2>&1 | FileCheck %s
#include <cstdlib>
diff --git a/compiler-rt/test/ubsan_minimal/TestCases/nullptr-and-nonzero-offset.c b/compiler-rt/test/ubsan_minimal/TestCases/nullptr-and-nonzero-offset.c
index bba9a38..7378a17 100644
--- a/compiler-rt/test/ubsan_minimal/TestCases/nullptr-and-nonzero-offset.c
+++ b/compiler-rt/test/ubsan_minimal/TestCases/nullptr-and-nonzero-offset.c
@@ -1,5 +1,5 @@
-// RUN: %clang -fsanitize=pointer-overflow %s -o %t && %run %t 2>&1 | FileCheck %s --implicit-check-not="pointer-overflow"
-// RUN: %clangxx -x c++ -fsanitize=pointer-overflow %s -o %t && %run %t 2>&1 | FileCheck %s --implicit-check-not="pointer-overflow"
+// RUN: %clang_min_runtime -fsanitize=pointer-overflow %s -o %t && %run %t 2>&1 | FileCheck %s --implicit-check-not="pointer-overflow"
+// RUN: %clangxx_min_runtime -x c++ -fsanitize=pointer-overflow %s -o %t && %run %t 2>&1 | FileCheck %s --implicit-check-not="pointer-overflow"
#include <stdlib.h>
diff --git a/compiler-rt/test/ubsan_minimal/TestCases/override-callback.c b/compiler-rt/test/ubsan_minimal/TestCases/override-callback.c
index 9d326ff..aaed134 100644
--- a/compiler-rt/test/ubsan_minimal/TestCases/override-callback.c
+++ b/compiler-rt/test/ubsan_minimal/TestCases/override-callback.c
@@ -1,6 +1,6 @@
-// RUN: %clang -fsanitize=implicit-integer-sign-change %s -o %t && %run %t 2>&1 | FileCheck %s
-// RUN: %clang -fsanitize=implicit-integer-sign-change -fno-sanitize-recover=all %s -o %t && not --crash %run %t 2>&1 | FileCheck %s
-// RUN: %clang -fsanitize=implicit-integer-sign-change -fno-sanitize-recover=all -DOVERRIDE=1 %s -o %t && not --crash %run %t 2>&1 | FileCheck %s --check-prefixes=FATAL
+// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change %s -o %t && %run %t 2>&1 | FileCheck %s
+// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change -fno-sanitize-recover=all %s -o %t && not --crash %run %t 2>&1 | FileCheck %s
+// RUN: %clang_min_runtime -fsanitize=implicit-integer-sign-change -fno-sanitize-recover=all -DOVERRIDE=1 %s -o %t && not --crash %run %t 2>&1 | FileCheck %s --check-prefixes=FATAL
#include <stdint.h>
#include <stdio.h>
diff --git a/compiler-rt/test/ubsan_minimal/TestCases/recover-dedup-limit.cpp b/compiler-rt/test/ubsan_minimal/TestCases/recover-dedup-limit.cpp
index faa2b66..ebc8901 100644
--- a/compiler-rt/test/ubsan_minimal/TestCases/recover-dedup-limit.cpp
+++ b/compiler-rt/test/ubsan_minimal/TestCases/recover-dedup-limit.cpp
@@ -1,4 +1,4 @@
-// RUN: %clangxx -fsanitize=signed-integer-overflow -fsanitize-recover=all %s -o %t && %run %t 2>&1 | FileCheck %s
+// RUN: %clangxx_min_runtime -fsanitize=signed-integer-overflow -fsanitize-recover=all %s -o %t && %run %t 2>&1 | FileCheck %s
#include <stdint.h>
diff --git a/compiler-rt/test/ubsan_minimal/TestCases/recover-dedup.cpp b/compiler-rt/test/ubsan_minimal/TestCases/recover-dedup.cpp
index b7c9ddc..0b54579 100644
--- a/compiler-rt/test/ubsan_minimal/TestCases/recover-dedup.cpp
+++ b/compiler-rt/test/ubsan_minimal/TestCases/recover-dedup.cpp
@@ -1,4 +1,4 @@
-// RUN: %clangxx -w -fsanitize=signed-integer-overflow,nullability-return,returns-nonnull-attribute -fsanitize-recover=all %s -o %t && %run %t 2>&1 | FileCheck %s
+// RUN: %clangxx_min_runtime -w -fsanitize=signed-integer-overflow,nullability-return,returns-nonnull-attribute -fsanitize-recover=all %s -o %t && %run %t 2>&1 | FileCheck %s
#include <stdint.h>
#include <stdio.h>
diff --git a/compiler-rt/test/ubsan_minimal/TestCases/test-darwin-interface.c b/compiler-rt/test/ubsan_minimal/TestCases/test-darwin-interface.c
index 1da049f..abc1073 100644
--- a/compiler-rt/test/ubsan_minimal/TestCases/test-darwin-interface.c
+++ b/compiler-rt/test/ubsan_minimal/TestCases/test-darwin-interface.c
@@ -3,11 +3,11 @@
//
// REQUIRES: x86_64-darwin
-// RUN: nm -jgU `%clangxx -fsanitize-minimal-runtime -fsanitize=undefined %s -o %t '-###' 2>&1 | grep "libclang_rt.ubsan_minimal_osx_dynamic.dylib" | sed -e 's/.*"\(.*libclang_rt.ubsan_minimal_osx_dynamic.dylib\)".*/\1/'` | grep "^___ubsan_handle" \
+// RUN: nm -jgU `%clangxx_min_runtime -fsanitize-minimal-runtime -fsanitize=undefined %s -o %t '-###' 2>&1 | grep "libclang_rt.ubsan_minimal_osx_dynamic.dylib" | sed -e 's/.*"\(.*libclang_rt.ubsan_minimal_osx_dynamic.dylib\)".*/\1/'` | grep "^___ubsan_handle" \
// RUN: | sed 's/_minimal//g' \
// RUN: > %t.minimal.symlist
//
-// RUN: nm -jgU `%clangxx -fno-sanitize-minimal-runtime -fsanitize=undefined %s -o %t '-###' 2>&1 | grep "libclang_rt.ubsan_osx_dynamic.dylib" | sed -e 's/.*"\(.*libclang_rt.ubsan_osx_dynamic.dylib\)".*/\1/'` | grep "^___ubsan_handle" \
+// RUN: nm -jgU `%clangxx_min_runtime -fno-sanitize-minimal-runtime -fsanitize=undefined %s -o %t '-###' 2>&1 | grep "libclang_rt.ubsan_osx_dynamic.dylib" | sed -e 's/.*"\(.*libclang_rt.ubsan_osx_dynamic.dylib\)".*/\1/'` | grep "^___ubsan_handle" \
// RUN: | grep -vE "^___ubsan_handle_dynamic_type_cache_miss" \
// RUN: | grep -vE "^___ubsan_handle_cfi_bad_type" \
// RUN: | sed 's/_v1//g' \
diff --git a/compiler-rt/test/ubsan_minimal/TestCases/uadd-overflow.cpp b/compiler-rt/test/ubsan_minimal/TestCases/uadd-overflow.cpp
index 4ae081c..e1f04d3 100644
--- a/compiler-rt/test/ubsan_minimal/TestCases/uadd-overflow.cpp
+++ b/compiler-rt/test/ubsan_minimal/TestCases/uadd-overflow.cpp
@@ -1,5 +1,5 @@
-// RUN: %clangxx -fsanitize=unsigned-integer-overflow %s -o %t && %run %t 2>&1 | FileCheck %s
-// RUN: %clangxx -fsanitize=unsigned-integer-overflow -fno-sanitize-recover=all %s -o %t && not --crash %run %t 2>&1 | FileCheck %s
+// RUN: %clangxx_min_runtime -fsanitize=unsigned-integer-overflow %s -o %t && %run %t 2>&1 | FileCheck %s
+// RUN: %clangxx_min_runtime -fsanitize=unsigned-integer-overflow -fno-sanitize-recover=all %s -o %t && not --crash %run %t 2>&1 | FileCheck %s
#include <stdint.h>
diff --git a/compiler-rt/test/ubsan_minimal/lit.common.cfg.py b/compiler-rt/test/ubsan_minimal/lit.common.cfg.py
index bcc0e46..33473e2 100644
--- a/compiler-rt/test/ubsan_minimal/lit.common.cfg.py
+++ b/compiler-rt/test/ubsan_minimal/lit.common.cfg.py
@@ -27,9 +27,13 @@ target_cflags = [get_required_attr(config, "target_cflags")]
clang_ubsan_cflags = ["-fsanitize-minimal-runtime"] + target_cflags
clang_ubsan_cxxflags = config.cxx_mode_flags + clang_ubsan_cflags
-# Define %clang and %clangxx substitutions to use in test RUN lines.
-config.substitutions.append(("%clang ", build_invocation(clang_ubsan_cflags)))
-config.substitutions.append(("%clangxx ", build_invocation(clang_ubsan_cxxflags)))
+# Define %clang_min_runtime and %clangxx_min_runtime substitutions to use in test RUN lines.
+config.substitutions.append(
+ ("%clang_min_runtime ", build_invocation(clang_ubsan_cflags))
+)
+config.substitutions.append(
+ ("%clangxx_min_runtime ", build_invocation(clang_ubsan_cxxflags))
+)
# Default test suffixes.
config.suffixes = [".c", ".cpp"]
diff --git a/cross-project-tests/CMakeLists.txt b/cross-project-tests/CMakeLists.txt
index b4b1f47..192db87 100644
--- a/cross-project-tests/CMakeLists.txt
+++ b/cross-project-tests/CMakeLists.txt
@@ -19,11 +19,12 @@ set(CROSS_PROJECT_TEST_DEPS
FileCheck
check-gdb-llvm-support
count
- llvm-dwarfdump
+ llvm-ar
llvm-config
+ llvm-dwarfdump
llvm-objdump
- split-file
not
+ split-file
)
if ("clang" IN_LIST LLVM_ENABLE_PROJECTS)
diff --git a/cross-project-tests/dtlto/ld-archive-thin.test b/cross-project-tests/dtlto/ld-archive-thin.test
new file mode 100644
index 0000000..979da54
--- /dev/null
+++ b/cross-project-tests/dtlto/ld-archive-thin.test
@@ -0,0 +1,97 @@
+REQUIRES: ld.lld,llvm-ar
+
+## Test that a DTLTO link succeeds and outputs the expected set of files
+## correctly when thin archives are present.
+
+RUN: rm -rf %t && split-file %s %t && cd %t
+
+## Compile bitcode. -O2 is required for cross-module importing.
+RUN: %clang -O2 --target=x86_64-linux-gnu -flto=thin -c \
+RUN: foo.c bar.c dog.c cat.c start.c
+
+## Generate thin archives.
+RUN: llvm-ar rcs foo.a foo.o --thin
+## Create this bitcode thin archive in a subdirectory to test the expansion of
+## the path to a bitcode file that is referenced using "..", e.g., in this case
+## "../bar.o".
+RUN: mkdir lib
+RUN: llvm-ar rcs lib/bar.a bar.o --thin
+## Create this bitcode thin archive with an absolute path entry containing "..".
+RUN: llvm-ar rcs dog.a %t/lib/../dog.o --thin
+## The bitcode member of cat.a will not be used in the link.
+RUN: llvm-ar rcs cat.a cat.o --thin
+RUN: llvm-ar rcs start.a start.o --thin
+
+## Link from a different directory to ensure that thin archive member paths are
+## resolved correctly relative to the archive locations.
+RUN: mkdir %t/out && cd %t/out
+
+RUN: %clang --target=x86_64-linux-gnu -flto=thin -fuse-ld=lld %t/foo.a %t/lib/bar.a ../start.a %t/cat.a \
+RUN: -Wl,--whole-archive ../dog.a \
+RUN: -fthinlto-distributor=%python \
+RUN: -Xthinlto-distributor=%llvm_src_root/utils/dtlto/local.py \
+RUN: -Wl,--save-temps -nostdlib -Werror
+
+## Check that the required output files have been created.
+RUN: ls | sort | FileCheck %s
+
+## No files are expected before.
+CHECK-NOT: {{.}}
+
+## JSON jobs description.
+CHECK: {{^}}a.[[PID:[a-zA-Z0-9_]+]].dist-file.json{{$}}
+
+## Native output object files and individual summary index files.
+CHECK: {{^}}bar.3.[[PID]].native.o{{$}}
+CHECK: {{^}}bar.3.[[PID]].native.o.thinlto.bc{{$}}
+CHECK: {{^}}dog.1.[[PID]].native.o{{$}}
+CHECK: {{^}}dog.1.[[PID]].native.o.thinlto.bc{{$}}
+CHECK: {{^}}foo.2.[[PID]].native.o{{$}}
+CHECK: {{^}}foo.2.[[PID]].native.o.thinlto.bc{{$}}
+CHECK: {{^}}start.4.[[PID]].native.o{{$}}
+CHECK: {{^}}start.4.[[PID]].native.o.thinlto.bc{{$}}
+
+## No files are expected after.
+CHECK-NOT: {{.}}
+
+
+## It is important that cross-module inlining occurs for this test to show that Clang can
+## successfully load the bitcode file dependencies recorded in the summary indices.
+## Explicitly check that the expected importing has occurred.
+
+RUN: llvm-dis start.4.*.native.o.thinlto.bc -o - | \
+RUN: FileCheck %s --check-prefixes=FOO,BAR,START
+
+RUN: llvm-dis dog.1.*.native.o.thinlto.bc -o - | \
+RUN: FileCheck %s --check-prefixes=FOO,BAR,DOG,START
+
+RUN: llvm-dis foo.2.*.native.o.thinlto.bc -o - | \
+RUN: FileCheck %s --check-prefixes=FOO,BAR,START
+
+RUN: llvm-dis bar.3.*.native.o.thinlto.bc -o - | \
+RUN: FileCheck %s --check-prefixes=FOO,BAR,START
+
+FOO-DAG: foo.o
+BAR-DAG: bar.o
+DOG-DAG: dog.o
+START-DAG: start.o
+
+
+#--- foo.c
+extern int bar(int), _start(int);
+__attribute__((retain)) int foo(int x) { return x + bar(x) + _start(x); }
+
+#--- bar.c
+extern int foo(int), _start(int);
+__attribute__((retain)) int bar(int x) { return x + foo(x) + _start(x); }
+
+#--- dog.c
+extern int foo(int), bar(int), _start(int);
+__attribute__((retain)) int dog(int x) { return x + foo(x) + bar(x) + _start(x); }
+
+#--- cat.c
+__attribute__((retain)) void cat(int x) {}
+
+#--- start.c
+extern int foo(int), bar(int);
+__attribute__((retain)) int _start(int x) { return x + foo(x) + bar(x); }
diff --git a/cross-project-tests/lit.cfg.py b/cross-project-tests/lit.cfg.py
index 6a902bc..a3af729 100644
--- a/cross-project-tests/lit.cfg.py
+++ b/cross-project-tests/lit.cfg.py
@@ -107,6 +107,8 @@ lldb_path = llvm_config.use_llvm_tool("lldb", search_env="LLDB")
if lldb_path is not None:
config.available_features.add("lldb")
+if llvm_config.use_llvm_tool("llvm-ar"):
+ config.available_features.add("llvm-ar")
def configure_dexter_substitutions():
"""Configure substitutions for host platform and return list of dependencies"""
diff --git a/flang-rt/include/flang-rt/runtime/format.h b/flang-rt/include/flang-rt/runtime/format.h
index 9469cb0..89f815f 100644
--- a/flang-rt/include/flang-rt/runtime/format.h
+++ b/flang-rt/include/flang-rt/runtime/format.h
@@ -36,6 +36,14 @@ enum EditingFlags {
};
struct MutableModes {
+ // Handle DC or DECIMAL='COMMA' and determine the active separator character
+ constexpr RT_API_ATTRS char32_t GetSeparatorChar() const {
+ return editingFlags & decimalComma ? char32_t{';'} : char32_t{','};
+ }
+ constexpr RT_API_ATTRS char32_t GetRadixPointChar() const {
+ return editingFlags & decimalComma ? char32_t{','} : char32_t{'.'};
+ }
+
std::uint8_t editingFlags{0}; // BN, DP, SS
enum decimal::FortranRounding round{
executionEnvironment
diff --git a/flang-rt/include/flang-rt/runtime/io-stmt.h b/flang-rt/include/flang-rt/runtime/io-stmt.h
index 95b2ee7..1d680d7 100644
--- a/flang-rt/include/flang-rt/runtime/io-stmt.h
+++ b/flang-rt/include/flang-rt/runtime/io-stmt.h
@@ -856,7 +856,7 @@ private:
};
class InquireIOLengthState : public NoUnitIoStatementState,
- public IoDirectionState<Direction::Output> {
+ public OutputStatementState {
public:
RT_API_ATTRS InquireIOLengthState(
const char *sourceFile = nullptr, int sourceLine = 0);
diff --git a/flang-rt/lib/runtime/edit-input.cpp b/flang-rt/lib/runtime/edit-input.cpp
index 0c2a4cc..3a8abf3 100644
--- a/flang-rt/lib/runtime/edit-input.cpp
+++ b/flang-rt/lib/runtime/edit-input.cpp
@@ -19,14 +19,10 @@
namespace Fortran::runtime::io {
RT_OFFLOAD_API_GROUP_BEGIN
-// Handle DC or DECIMAL='COMMA' and determine the active separator character
-static inline RT_API_ATTRS char32_t GetSeparatorChar(const DataEdit &edit) {
- return edit.modes.editingFlags & decimalComma ? char32_t{';'} : char32_t{','};
-}
-
static inline RT_API_ATTRS bool IsCharValueSeparator(
const DataEdit &edit, char32_t ch) {
- return ch == ' ' || ch == '\t' || ch == '/' || ch == GetSeparatorChar(edit) ||
+ return ch == ' ' || ch == '\t' || ch == '/' ||
+ ch == edit.modes.GetSeparatorChar() ||
(edit.IsNamelist() && (ch == '&' || ch == '$'));
}
@@ -68,7 +64,7 @@ static RT_API_ATTRS bool EditBOZInput(
// Count significant digits after any leading white space & zeroes
int digits{0};
int significantBits{0};
- const char32_t comma{GetSeparatorChar(edit)};
+ char32_t comma{edit.modes.GetSeparatorChar()};
for (; next; next = io.NextInField(remaining, edit)) {
char32_t ch{*next};
if (ch == ' ' || ch == '\t') {
@@ -156,10 +152,6 @@ static RT_API_ATTRS bool EditBOZInput(
return CheckCompleteListDirectedField(io, edit);
}
-static inline RT_API_ATTRS char32_t GetRadixPointChar(const DataEdit &edit) {
- return edit.modes.editingFlags & decimalComma ? char32_t{','} : char32_t{'.'};
-}
-
// Prepares input from a field, and returns the sign, if any, else '\0'.
static RT_API_ATTRS char ScanNumericPrefix(IoStatementState &io,
const DataEdit &edit, Fortran::common::optional<char32_t> &next,
@@ -221,7 +213,7 @@ RT_API_ATTRS bool EditIntegerInput(IoStatementState &io, const DataEdit &edit,
common::uint128_t value{0};
bool any{!!sign};
bool overflow{false};
- const char32_t comma{GetSeparatorChar(edit)};
+ char32_t comma{edit.modes.GetSeparatorChar()};
static constexpr auto maxu128{~common::uint128_t{0}};
for (; next; next = io.NextInField(remaining, edit, &fastField)) {
char32_t ch{*next};
@@ -238,7 +230,7 @@ RT_API_ATTRS bool EditIntegerInput(IoStatementState &io, const DataEdit &edit,
} else if (ch == comma) {
break; // end non-list-directed field early
} else {
- if (edit.modes.inNamelist && ch == GetRadixPointChar(edit)) {
+ if (edit.modes.inNamelist && ch == edit.modes.GetRadixPointChar()) {
// Ignore any fractional part that might appear in NAMELIST integer
// input, like a few other Fortran compilers do.
// TODO: also process exponents? Some compilers do, but they obviously
@@ -344,7 +336,7 @@ static RT_API_ATTRS ScannedRealInput ScanRealInput(
}
bool bzMode{(edit.modes.editingFlags & blankZero) != 0};
int exponent{0};
- const char32_t comma{GetSeparatorChar(edit)};
+ char32_t comma{edit.modes.GetSeparatorChar()};
if (!next || (!bzMode && *next == ' ') || *next == comma) {
if (!edit.IsListDirected() && !io.GetConnectionState().IsAtEOF()) {
// An empty/blank field means zero when not list-directed.
@@ -355,7 +347,7 @@ static RT_API_ATTRS ScannedRealInput ScanRealInput(
}
return {got, exponent, false};
}
- char32_t radixPointChar{GetRadixPointChar(edit)};
+ char32_t radixPointChar{edit.modes.GetRadixPointChar()};
char32_t first{*next >= 'a' && *next <= 'z' ? *next + 'A' - 'a' : *next};
bool isHexadecimal{false};
if (first == 'N' || first == 'I') {
@@ -518,7 +510,7 @@ static RT_API_ATTRS ScannedRealInput ScanRealInput(
} else if (radixPointOffset) {
exponent += *radixPointOffset;
} else {
- // When no redix point (or comma) appears in the value, the 'd'
+ // When no radix point (or comma) appears in the value, the 'd'
// part of the edit descriptor must be interpreted as the number of
// digits in the value to be interpreted as being to the *right* of
// the assumed radix point (13.7.2.3.2)
@@ -959,10 +951,12 @@ RT_API_ATTRS bool EditLogicalInput(
"Bad character '%lc' in LOGICAL input field", *next);
return false;
}
- if (remaining) { // ignore the rest of a fixed-width field
- io.HandleRelativePosition(*remaining);
- } else if (edit.descriptor == DataEdit::ListDirected) {
- while (io.NextInField(remaining, edit)) { // discard rest of field
+ if (remaining || edit.descriptor == DataEdit::ListDirected) {
+ // Ignore the rest of the input field; stop after separator when
+ // not list-directed.
+ char32_t comma{edit.modes.GetSeparatorChar()};
+ while (next && *next != comma) {
+ next = io.NextInField(remaining, edit);
}
}
return CheckCompleteListDirectedField(io, edit);
diff --git a/flang-rt/lib/runtime/io-stmt.cpp b/flang-rt/lib/runtime/io-stmt.cpp
index 5667d67..36bffd4 100644
--- a/flang-rt/lib/runtime/io-stmt.cpp
+++ b/flang-rt/lib/runtime/io-stmt.cpp
@@ -839,10 +839,7 @@ ListDirectedStatementState<Direction::Input>::GetNextDataEdit(
edit.descriptor = DataEdit::ListDirectedNullValue;
return edit;
}
- char32_t comma{','};
- if (edit.modes.editingFlags & decimalComma) {
- comma = ';';
- }
+ const char32_t comma{edit.modes.GetSeparatorChar()};
std::size_t byteCount{0};
if (remaining_ > 0 && !realPart_) { // "r*c" repetition in progress
RUNTIME_CHECK(io.GetIoErrorHandler(), repeatPosition_.has_value());
diff --git a/flang-rt/lib/runtime/namelist.cpp b/flang-rt/lib/runtime/namelist.cpp
index 1bef387..2325ca1 100644
--- a/flang-rt/lib/runtime/namelist.cpp
+++ b/flang-rt/lib/runtime/namelist.cpp
@@ -27,8 +27,7 @@ RT_VAR_GROUP_END
RT_OFFLOAD_API_GROUP_BEGIN
static inline RT_API_ATTRS char32_t GetComma(IoStatementState &io) {
- return io.mutableModes().editingFlags & decimalComma ? char32_t{';'}
- : char32_t{','};
+ return io.mutableModes().GetSeparatorChar();
}
bool IODEF(OutputNamelist)(Cookie cookie, const NamelistGroup &group) {
diff --git a/flang-rt/lib/runtime/unit.h b/flang-rt/lib/runtime/unit.h
index 9aec9b1..f266a48 100644
--- a/flang-rt/lib/runtime/unit.h
+++ b/flang-rt/lib/runtime/unit.h
@@ -161,9 +161,6 @@ public:
lock_.Take();
#endif
A &state{u_.emplace<A>(std::forward<X>(xs)...)};
- if constexpr (!std::is_same_v<A, OpenStatementState>) {
- state.mutableModes() = ConnectionState::modes;
- }
directAccessRecWasSet_ = false;
io_.emplace(state);
return *io_;
diff --git a/flang-rt/unittests/Runtime/CMakeLists.txt b/flang-rt/unittests/Runtime/CMakeLists.txt
index 49f55a4..cf1e15d 100644
--- a/flang-rt/unittests/Runtime/CMakeLists.txt
+++ b/flang-rt/unittests/Runtime/CMakeLists.txt
@@ -19,6 +19,7 @@ add_flangrt_unittest(RuntimeTests
Derived.cpp
ExternalIOTest.cpp
Format.cpp
+ InputExtensions.cpp
Inquiry.cpp
ListInputTest.cpp
LogicalFormatTest.cpp
diff --git a/flang-rt/unittests/Runtime/InputExtensions.cpp b/flang-rt/unittests/Runtime/InputExtensions.cpp
new file mode 100644
index 0000000..4bb1124
--- /dev/null
+++ b/flang-rt/unittests/Runtime/InputExtensions.cpp
@@ -0,0 +1,106 @@
+//===-- unittests/Runtime/InputExtensions.cpp -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CrashHandlerFixture.h"
+#include "flang-rt/runtime/descriptor.h"
+#include "flang/Runtime/io-api.h"
+#include <algorithm>
+#include <array>
+#include <cstring>
+#include <gtest/gtest.h>
+#include <tuple>
+
+using namespace Fortran::runtime;
+using namespace Fortran::runtime::io;
+
+struct InputExtensionTests : CrashHandlerFixture {};
+
+TEST(InputExtensionTests, SeparatorInField_F) {
+ static const struct {
+ int get;
+ const char *format, *data;
+ double expect[3];
+ } test[] = {
+ {2, "(2F6)", "1.25,3.75,", {1.25, 3.75}},
+ {2, "(2F6)", "1.25 ,3.75 ,", {1.25, 3.75}},
+ {2, "(DC,2F6)", "1,25;3,75;", {1.25, 3.75}},
+ {2, "(DC,2F6)", "1,25 ;3,75 ;", {1.25, 3.75}},
+ };
+ for (std::size_t j{0}; j < sizeof test / sizeof *test; ++j) {
+ auto cookie{IONAME(BeginInternalFormattedInput)(test[j].data,
+ std::strlen(test[j].data), test[j].format,
+ std::strlen(test[j].format))};
+ for (int k{0}; k < test[j].get; ++k) {
+ float got;
+ IONAME(InputReal32)(cookie, got);
+ ASSERT_EQ(got, test[j].expect[k])
+ << "expected " << test[j].expect[k] << ", got " << got;
+ }
+ auto status{IONAME(EndIoStatement)(cookie)};
+ ASSERT_EQ(status, 0) << "error status " << status << " on F test case "
+ << j;
+ }
+}
+
+TEST(InputExtensionTests, SeparatorInField_I) {
+ static const struct {
+ int get;
+ const char *format, *data;
+ std::int64_t expect[3];
+ } test[] = {
+ {2, "(2I4)", "12,34,", {12, 34}},
+ {2, "(2I4)", "12 ,34 ,", {12, 34}},
+ {2, "(DC,2I4)", "12;34;", {12, 34}},
+ {2, "(DC,2I4)", "12 ;34 ;", {12, 34}},
+ };
+ for (std::size_t j{0}; j < sizeof test / sizeof *test; ++j) {
+ auto cookie{IONAME(BeginInternalFormattedInput)(test[j].data,
+ std::strlen(test[j].data), test[j].format,
+ std::strlen(test[j].format))};
+ for (int k{0}; k < test[j].get; ++k) {
+ std::int64_t got;
+ IONAME(InputInteger)(cookie, got);
+ ASSERT_EQ(got, test[j].expect[k])
+ << "expected " << test[j].expect[k] << ", got " << got;
+ }
+ auto status{IONAME(EndIoStatement)(cookie)};
+ ASSERT_EQ(status, 0) << "error status " << status << " on I test case "
+ << j;
+ }
+}
+
+TEST(InputExtensionTests, SeparatorInField_L) {
+ static const struct {
+ int get;
+ const char *format, *data;
+ bool expect[3];
+ } test[] = {
+ {2, "(2L4)", ".T,F,", {true, false}},
+ {2, "(2L4)", ".F,T,", {false, true}},
+ {2, "(2L4)", ".T.,F,", {true, false}},
+ {2, "(2L4)", ".F.,T,", {false, true}},
+ {2, "(DC,2L4)", ".T;F,", {true, false}},
+ {2, "(DC,2L4)", ".F;T,", {false, true}},
+ {2, "(DC,2L4)", ".T.;F,", {true, false}},
+ {2, "(DC,2L4)", ".F.;T,", {false, true}},
+ };
+ for (std::size_t j{0}; j < sizeof test / sizeof *test; ++j) {
+ auto cookie{IONAME(BeginInternalFormattedInput)(test[j].data,
+ std::strlen(test[j].data), test[j].format,
+ std::strlen(test[j].format))};
+ for (int k{0}; k < test[j].get; ++k) {
+ bool got;
+ IONAME(InputLogical)(cookie, got);
+ ASSERT_EQ(got, test[j].expect[k])
+ << "expected " << test[j].expect[k] << ", got " << got;
+ }
+ auto status{IONAME(EndIoStatement)(cookie)};
+ ASSERT_EQ(status, 0) << "error status " << status << " on L test case "
+ << j;
+ }
+}
diff --git a/flang/docs/Extensions.md b/flang/docs/Extensions.md
index c167a55..d697842 100644
--- a/flang/docs/Extensions.md
+++ b/flang/docs/Extensions.md
@@ -420,8 +420,9 @@ end
* A `NAMELIST` input group may omit its trailing `/` character if
it is followed by another `NAMELIST` input group.
* A `NAMELIST` input group may begin with either `&` or `$`.
-* A comma in a fixed-width numeric input field terminates the
- field rather than signaling an invalid character error.
+* A comma (or semicolon in `DECIMAL='COMMA'` or `DC` mode) in a
+ fixed-width numeric input field terminates the field rather than
+ signaling an invalid character error.
* Arguments to the intrinsic functions `MAX` and `MIN` are converted
when necessary to the type of the result.
An `OPTIONAL`, `POINTER`, or `ALLOCATABLE` argument after
diff --git a/flang/docs/FortranStandardsSupport.md b/flang/docs/FortranStandardsSupport.md
index f54c65b..dc273fb 100644
--- a/flang/docs/FortranStandardsSupport.md
+++ b/flang/docs/FortranStandardsSupport.md
@@ -40,12 +40,12 @@ status of all important Fortran 2023 features. The table entries are based on th
| Conditional expressions and arguments | N | |
| More use of boz constants | P | All usages other than enum are supported |
| Intrinsics for extracting tokens from a string | N | |
-| Intrinsics for Trig functions that work in degrees | N | |
-| Intrinsics for Trig functions that work in half revolutions| N | |
+| Intrinsics for Trig functions that work in degrees | Y | |
+| Intrinsics for Trig functions that work in half revolutions| Y | |
| Changes to system_clock | N | |
| Changes for conformance with the new IEEE standard | Y | |
| Additional named constants to specify kinds | Y | |
-| Extensions for c_f_pointer intrinsic | N | |
+| Extensions for c_f_pointer intrinsic | Y | |
| Procedures for converting between fortran and c strings | N | |
| The at edit descriptor | N | |
| Control over leading zeros in output of real values | N | |
diff --git a/flang/examples/FeatureList/FeatureList.cpp b/flang/examples/FeatureList/FeatureList.cpp
index e9aeed1..64b57b6 100644
--- a/flang/examples/FeatureList/FeatureList.cpp
+++ b/flang/examples/FeatureList/FeatureList.cpp
@@ -529,7 +529,6 @@ public:
READ_FEATURE(OmpChunkModifier::Value)
READ_FEATURE(OmpOrderingModifier)
READ_FEATURE(OmpOrderingModifier::Value)
- READ_FEATURE(OmpSectionBlocks)
READ_FEATURE(OmpSectionsDirective)
READ_FEATURE(Only)
READ_FEATURE(OpenACCAtomicConstruct)
diff --git a/flang/examples/FlangOmpReport/FlangOmpReportVisitor.cpp b/flang/examples/FlangOmpReport/FlangOmpReportVisitor.cpp
index feb7b4e..5c64870 100644
--- a/flang/examples/FlangOmpReport/FlangOmpReportVisitor.cpp
+++ b/flang/examples/FlangOmpReport/FlangOmpReportVisitor.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "FlangOmpReportVisitor.h"
+#include "flang/Parser/openmp-utils.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Frontend/OpenMP/OMP.h"
@@ -118,60 +119,8 @@ std::string OpenMPCounterVisitor::getName(const OpenMPDeclarativeConstruct &c) {
c.u);
}
std::string OpenMPCounterVisitor::getName(const OpenMPConstruct &c) {
- return std::visit(
- Fortran::common::visitors{
- [&](const OpenMPStandaloneConstruct &c) -> std::string {
- return common::visit(
- common::visitors{
- [&](const OmpMetadirectiveDirective &d) {
- return normalize_construct_name(d.source.ToString());
- },
- [&](auto &&d) {
- const CharBlock &source{
- std::get<OmpDirectiveName>(d.v.t).source};
- return normalize_construct_name(source.ToString());
- },
- },
- c.u);
- },
- [&](const OpenMPExecutableAllocate &c) -> std::string {
- const CharBlock &source{std::get<0>(c.t).source};
- return normalize_construct_name(source.ToString());
- },
- [&](const OpenMPDeclarativeAllocate &c) -> std::string {
- const CharBlock &source{std::get<0>(c.t).source};
- return normalize_construct_name(source.ToString());
- },
- [&](const OpenMPAssumeConstruct &c) -> std::string {
- const CharBlock &source{std::get<0>(c.t).source};
- return normalize_construct_name(source.ToString());
- },
- [&](const OpenMPAllocatorsConstruct &c) -> std::string {
- const CharBlock &source{std::get<0>(c.t).source};
- return normalize_construct_name(source.ToString());
- },
- [&](const OpenMPAtomicConstruct &c) -> std::string {
- auto &dirSpec = std::get<OmpDirectiveSpecification>(c.t);
- auto &dirName = std::get<OmpDirectiveName>(dirSpec.t);
- return normalize_construct_name(dirName.source.ToString());
- },
- [&](const OpenMPUtilityConstruct &c) -> std::string {
- const CharBlock &source{c.source};
- return normalize_construct_name(source.ToString());
- },
- [&](const OpenMPSectionConstruct &c) -> std::string {
- return "section";
- },
- // OpenMPSectionsConstruct, OpenMPLoopConstruct,
- // OpenMPBlockConstruct, OpenMPCriticalConstruct Get the source from
- // the directive field of the begin directive or from the verbatim
- // field of the begin directive in Critical
- [&](const auto &c) -> std::string {
- const CharBlock &source{std::get<0>(std::get<0>(c.t).t).source};
- return normalize_construct_name(source.ToString());
- },
- },
- c.u);
+ return normalize_construct_name(
+ omp::GetOmpDirectiveName(c).source.ToString());
}
bool OpenMPCounterVisitor::Pre(const OpenMPDeclarativeConstruct &c) {
diff --git a/flang/include/flang/Parser/dump-parse-tree.h b/flang/include/flang/Parser/dump-parse-tree.h
index 23e35d1..9141443 100644
--- a/flang/include/flang/Parser/dump-parse-tree.h
+++ b/flang/include/flang/Parser/dump-parse-tree.h
@@ -581,6 +581,8 @@ public:
NODE(parser, OmpDependClause)
NODE(OmpDependClause, TaskDep)
NODE(OmpDependClause::TaskDep, Modifier)
+ NODE(parser, OmpAutomapModifier)
+ NODE_ENUM(OmpAutomapModifier, Value)
NODE(parser, OmpDetachClause)
NODE(parser, OmpDoacrossClause)
NODE(parser, OmpDestroyClause)
@@ -588,6 +590,8 @@ public:
NODE(parser, OmpEndCriticalDirective)
NODE(parser, OmpEndLoopDirective)
NODE(parser, OmpEndSectionsDirective)
+ NODE(parser, OmpEnterClause)
+ NODE(OmpEnterClause, Modifier)
NODE(parser, OmpFailClause)
NODE(parser, OmpFromClause)
NODE(OmpFromClause, Modifier)
@@ -681,7 +685,6 @@ public:
NODE_ENUM(OmpChunkModifier, Value)
NODE(parser, OmpOrderingModifier)
NODE_ENUM(OmpOrderingModifier, Value)
- NODE(parser, OmpSectionBlocks)
NODE(parser, OmpSectionsDirective)
NODE(parser, OmpToClause)
NODE(OmpToClause, Modifier)
diff --git a/flang/include/flang/Parser/openmp-utils.h b/flang/include/flang/Parser/openmp-utils.h
index 579ea7d..41c0442 100644
--- a/flang/include/flang/Parser/openmp-utils.h
+++ b/flang/include/flang/Parser/openmp-utils.h
@@ -78,6 +78,14 @@ struct DirectiveNameScope {
return MakeName(dir.source, dir.v);
}
+ static OmpDirectiveName GetOmpDirectiveName(const OpenMPSectionConstruct &x) {
+ if (auto &spec{std::get<std::optional<OmpDirectiveSpecification>>(x.t)}) {
+ return spec->DirName();
+ } else {
+ return MakeName({}, llvm::omp::Directive::OMPD_section);
+ }
+ }
+
static OmpDirectiveName GetOmpDirectiveName(
const OmpBeginSectionsDirective &x) {
auto &dir{std::get<OmpSectionsDirective>(x.t)};
diff --git a/flang/include/flang/Parser/parse-tree.h b/flang/include/flang/Parser/parse-tree.h
index 3a28f6f..137552f 100644
--- a/flang/include/flang/Parser/parse-tree.h
+++ b/flang/include/flang/Parser/parse-tree.h
@@ -3469,6 +3469,12 @@ WRAPPER_CLASS(PauseStmt, std::optional<StopCode>);
// --- Common definitions
+#define INHERITED_WRAPPER_CLASS_BOILERPLATE(classname, basename) \
+ BOILERPLATE(classname); \
+ using basename::basename; \
+ classname(basename &&base) : basename(std::move(base)) {} \
+ using WrapperTrait = std::true_type
+
struct OmpClause;
struct OmpDirectiveSpecification;
@@ -3476,6 +3482,7 @@ struct OmpDirectiveName {
// No boilerplates: this class should be copyable, movable, etc.
constexpr OmpDirectiveName() = default;
constexpr OmpDirectiveName(const OmpDirectiveName &) = default;
+ constexpr OmpDirectiveName(llvm::omp::Directive x) : v(x) {}
// Construct from an already parsed text. Use Verbatim for this because
// Verbatim's source corresponds to an actual source location.
// This allows "construct<OmpDirectiveName>(Verbatim("<name>"))".
@@ -3769,6 +3776,16 @@ struct OmpAlwaysModifier {
WRAPPER_CLASS_BOILERPLATE(OmpAlwaysModifier, Value);
};
+// Ref: [6.0:289-290]
+//
+// automap-modifier ->
+// automap // since 6.0
+//
+struct OmpAutomapModifier {
+ ENUM_CLASS(Value, Automap);
+ WRAPPER_CLASS_BOILERPLATE(OmpAutomapModifier, Value);
+};
+
// Ref: [5.2:252-254]
//
// chunk-modifier ->
@@ -3848,7 +3865,10 @@ struct OmpDeviceModifier {
// [*] The IF clause is allowed on CANCEL in OpenMP 4.5, but only without
// the directive-name-modifier. For the sake of uniformity CANCEL can be
// considered a valid value in 4.5 as well.
-using OmpDirectiveNameModifier = OmpDirectiveName;
+struct OmpDirectiveNameModifier : public OmpDirectiveName {
+ INHERITED_WRAPPER_CLASS_BOILERPLATE(
+ OmpDirectiveNameModifier, OmpDirectiveName);
+};
// Ref: [5.1:205-209], [5.2:166-168]
//
@@ -4350,6 +4370,17 @@ struct OmpDeviceTypeClause {
WRAPPER_CLASS_BOILERPLATE(OmpDeviceTypeClause, DeviceTypeDescription);
};
+// Ref: [5.2:158-159], [6.0:289-290]
+//
+// enter-clause ->
+// ENTER(locator-list) |
+// ENTER(automap-modifier: locator-list) | // since 6.0
+struct OmpEnterClause {
+ TUPLE_CLASS_BOILERPLATE(OmpEnterClause);
+ MODIFIER_BOILERPLATE(OmpAutomapModifier);
+ std::tuple<MODIFIERS(), OmpObjectList> t;
+};
+
// OMP 5.2 15.8.3 extended-atomic, fail-clause ->
// FAIL(memory-order)
struct OmpFailClause {
@@ -4808,18 +4839,17 @@ struct OmpEndSectionsDirective {
// structured-block]
// ...
struct OpenMPSectionConstruct {
- WRAPPER_CLASS_BOILERPLATE(OpenMPSectionConstruct, Block);
+ TUPLE_CLASS_BOILERPLATE(OpenMPSectionConstruct);
+ std::tuple<std::optional<OmpDirectiveSpecification>, Block> t;
CharBlock source;
};
-// `OmpSectionBlocks` is a list of section constructs. The parser guarentees
-// that the `OpenMPConstruct` here always encapsulates an
-// `OpenMPSectionConstruct` and not any other OpenMP construct.
-WRAPPER_CLASS(OmpSectionBlocks, std::list<OpenMPConstruct>);
-
struct OpenMPSectionsConstruct {
TUPLE_CLASS_BOILERPLATE(OpenMPSectionsConstruct);
- std::tuple<OmpBeginSectionsDirective, OmpSectionBlocks,
+ CharBlock source;
+ // Each of the OpenMPConstructs in the list below contains an
+ // OpenMPSectionConstruct. This is guaranteed by the parser.
+ std::tuple<OmpBeginSectionsDirective, std::list<OpenMPConstruct>,
OmpEndSectionsDirective>
t;
};
diff --git a/flang/include/flang/Semantics/openmp-modifiers.h b/flang/include/flang/Semantics/openmp-modifiers.h
index a9fe911..e0eae98 100644
--- a/flang/include/flang/Semantics/openmp-modifiers.h
+++ b/flang/include/flang/Semantics/openmp-modifiers.h
@@ -72,6 +72,7 @@ DECLARE_DESCRIPTOR(parser::OmpAlignModifier);
DECLARE_DESCRIPTOR(parser::OmpAllocatorComplexModifier);
DECLARE_DESCRIPTOR(parser::OmpAllocatorSimpleModifier);
DECLARE_DESCRIPTOR(parser::OmpAlwaysModifier);
+DECLARE_DESCRIPTOR(parser::OmpAutomapModifier);
DECLARE_DESCRIPTOR(parser::OmpChunkModifier);
DECLARE_DESCRIPTOR(parser::OmpCloseModifier);
DECLARE_DESCRIPTOR(parser::OmpContextSelector);
diff --git a/flang/include/flang/Semantics/semantics.h b/flang/include/flang/Semantics/semantics.h
index 0dbca51..12220cc 100644
--- a/flang/include/flang/Semantics/semantics.h
+++ b/flang/include/flang/Semantics/semantics.h
@@ -162,7 +162,6 @@ public:
warningsAreErrors_ = x;
return *this;
}
-
SemanticsContext &set_debugModuleWriter(bool x) {
debugModuleWriter_ = x;
return *this;
diff --git a/flang/lib/Frontend/CompilerInstance.cpp b/flang/lib/Frontend/CompilerInstance.cpp
index 2e0f91f..cd8ddda 100644
--- a/flang/lib/Frontend/CompilerInstance.cpp
+++ b/flang/lib/Frontend/CompilerInstance.cpp
@@ -233,10 +233,8 @@ clang::IntrusiveRefCntPtr<clang::DiagnosticsEngine>
CompilerInstance::createDiagnostics(clang::DiagnosticOptions &opts,
clang::DiagnosticConsumer *client,
bool shouldOwnClient) {
- clang::IntrusiveRefCntPtr<clang::DiagnosticIDs> diagID(
- new clang::DiagnosticIDs());
- clang::IntrusiveRefCntPtr<clang::DiagnosticsEngine> diags(
- new clang::DiagnosticsEngine(diagID, opts));
+ auto diags = llvm::makeIntrusiveRefCnt<clang::DiagnosticsEngine>(
+ clang::DiagnosticIDs::create(), opts);
// Create the diagnostic client for reporting errors or for
// implementing -verify.
diff --git a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp
index 594f95e..8eabf4f 100644
--- a/flang/lib/Lower/OpenMP/ClauseProcessor.cpp
+++ b/flang/lib/Lower/OpenMP/ClauseProcessor.cpp
@@ -1519,10 +1519,14 @@ bool ClauseProcessor::processTo(
bool ClauseProcessor::processEnter(
llvm::SmallVectorImpl<DeclareTargetCapturePair> &result) const {
return findRepeatableClause<omp::clause::Enter>(
- [&](const omp::clause::Enter &clause, const parser::CharBlock &) {
+ [&](const omp::clause::Enter &clause, const parser::CharBlock &source) {
+ mlir::Location currentLocation = converter.genLocation(source);
+ if (std::get<std::optional<omp::clause::Enter::Modifier>>(clause.t))
+ TODO(currentLocation, "Declare target enter AUTOMAP modifier");
// Case: declare target enter(func, var1, var2)...
- gatherFuncAndVarSyms(
- clause.v, mlir::omp::DeclareTargetCaptureClause::enter, result);
+ gatherFuncAndVarSyms(std::get<ObjectList>(clause.t),
+ mlir::omp::DeclareTargetCaptureClause::enter,
+ result);
});
}
diff --git a/flang/lib/Lower/OpenMP/Clauses.cpp b/flang/lib/Lower/OpenMP/Clauses.cpp
index 686fba0..7f75aae 100644
--- a/flang/lib/Lower/OpenMP/Clauses.cpp
+++ b/flang/lib/Lower/OpenMP/Clauses.cpp
@@ -772,8 +772,19 @@ Doacross make(const parser::OmpClause::Doacross &inp,
Enter make(const parser::OmpClause::Enter &inp,
semantics::SemanticsContext &semaCtx) {
- // inp.v -> parser::OmpObjectList
- return Enter{makeObjects(/*List=*/inp.v, semaCtx)};
+ // inp.v -> parser::OmpEnterClause
+ CLAUSET_ENUM_CONVERT( //
+ convert, parser::OmpAutomapModifier::Value, Enter::Modifier,
+ // clang-format off
+ MS(Automap, Automap)
+ // clang-format on
+ );
+ auto &mods = semantics::OmpGetModifiers(inp.v);
+ auto *mod = semantics::OmpGetUniqueModifier<parser::OmpAutomapModifier>(mods);
+ auto &objList = std::get<parser::OmpObjectList>(inp.v.t);
+
+ return Enter{{/*Modifier=*/maybeApplyToV(convert, mod),
+ /*List=*/makeObjects(objList, semaCtx)}};
}
Exclusive make(const parser::OmpClause::Exclusive &inp,
diff --git a/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp b/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
index 2ac4d95..2c0cbb2 100644
--- a/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
+++ b/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
@@ -389,42 +389,16 @@ void DataSharingProcessor::insertLastPrivateCompare(mlir::Operation *op) {
}
}
-static const parser::CharBlock *
-getSource(const semantics::SemanticsContext &semaCtx,
- const lower::pft::Evaluation &eval) {
- const parser::CharBlock *source = nullptr;
-
- auto ompConsVisit = [&](const parser::OpenMPConstruct &x) {
- std::visit(
- common::visitors{
- [&](const parser::OpenMPSectionsConstruct &x) {
- source = &std::get<0>(x.t).source;
- },
- [&](const parser::OpenMPLoopConstruct &x) {
- source = &std::get<0>(x.t).source;
- },
- [&](const parser::OpenMPBlockConstruct &x) {
- source = &std::get<0>(x.t).source;
- },
- [&](const parser::OpenMPCriticalConstruct &x) {
- source = &std::get<0>(x.t).source;
- },
- [&](const parser::OpenMPAtomicConstruct &x) {
- source = &std::get<parser::OmpDirectiveSpecification>(x.t).source;
- },
- [&](const auto &x) { source = &x.source; },
- },
- x.u);
- };
-
- eval.visit(common::visitors{
- [&](const parser::OpenMPConstruct &x) { ompConsVisit(x); },
- [&](const parser::OpenMPDeclarativeConstruct &x) { source = &x.source; },
- [&](const parser::OmpEndLoopDirective &x) { source = &x.source; },
- [&](const auto &x) {},
+static parser::CharBlock getSource(const semantics::SemanticsContext &semaCtx,
+ const lower::pft::Evaluation &eval) {
+ return eval.visit(common::visitors{
+ [&](const parser::OpenMPConstruct &x) {
+ return parser::omp::GetOmpDirectiveName(x).source;
+ },
+ [&](const parser::OpenMPDeclarativeConstruct &x) { return x.source; },
+ [&](const parser::OmpEndLoopDirective &x) { return x.source; },
+ [&](const auto &x) { return parser::CharBlock{}; },
});
-
- return source;
}
static void collectPrivatizingConstructs(
@@ -518,11 +492,11 @@ void DataSharingProcessor::collectSymbols(
for (const semantics::Scope &child : scope->children())
collectScopes(&child);
};
- const parser::CharBlock *source =
- clauses.empty() ? getSource(semaCtx, eval) : &clauses.front().source;
+ parser::CharBlock source =
+ clauses.empty() ? getSource(semaCtx, eval) : clauses.front().source;
const semantics::Scope *curScope = nullptr;
- if (source && !source->empty()) {
- curScope = &semaCtx.FindScope(*source);
+ if (!source.empty()) {
+ curScope = &semaCtx.FindScope(source);
collectScopes(curScope);
}
// Collect all symbols referenced in the evaluation being processed,
diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp
index 6a4ec77..575658f 100644
--- a/flang/lib/Lower/OpenMP/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP/OpenMP.cpp
@@ -2332,7 +2332,7 @@ genSectionsOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
assert(sectionsConstruct && "Missing additional parsing information");
const auto &sectionBlocks =
- std::get<parser::OmpSectionBlocks>(sectionsConstruct->t);
+ std::get<std::list<parser::OpenMPConstruct>>(sectionsConstruct->t);
mlir::omp::SectionsOperands clauseOps;
llvm::SmallVector<const semantics::Symbol *> reductionSyms;
genSectionsClauses(converter, semaCtx, item->clauses, loc, clauseOps,
@@ -2385,7 +2385,7 @@ genSectionsOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
// because we need to run genReductionVars on each omp.section so that the
// reduction variable gets mapped to the private version
for (auto [construct, nestedEval] :
- llvm::zip(sectionBlocks.v, eval.getNestedEvaluations())) {
+ llvm::zip(sectionBlocks, eval.getNestedEvaluations())) {
const auto *sectionConstruct =
std::get_if<parser::OpenMPSectionConstruct>(&construct.u);
if (!sectionConstruct) {
diff --git a/flang/lib/Parser/openmp-parsers.cpp b/flang/lib/Parser/openmp-parsers.cpp
index 1c62614..9b73dc8 100644
--- a/flang/lib/Parser/openmp-parsers.cpp
+++ b/flang/lib/Parser/openmp-parsers.cpp
@@ -34,6 +34,39 @@ namespace Fortran::parser {
constexpr auto startOmpLine = skipStuffBeforeStatement >> "!$OMP "_sptok;
constexpr auto endOmpLine = space >> endOfLine;
+// Given a parser for a single element, and a parser for a list of elements
+// of the same type, create a parser that constructs the entire list by having
+// the single element be the head of the list, and the rest be the tail.
+template <typename ParserH, typename ParserT> struct ConsParser {
+ static_assert(std::is_same_v<std::list<typename ParserH::resultType>,
+ typename ParserT::resultType>);
+
+ using resultType = typename ParserT::resultType;
+ constexpr ConsParser(ParserH h, ParserT t) : head_(h), tail_(t) {}
+
+ std::optional<resultType> Parse(ParseState &state) const {
+ if (auto &&first{head_.Parse(state)}) {
+ if (auto rest{tail_.Parse(state)}) {
+ rest->push_front(std::move(*first));
+ return std::move(*rest);
+ }
+ }
+ return std::nullopt;
+ }
+
+private:
+ const ParserH head_;
+ const ParserT tail_;
+};
+
+template <typename ParserH, typename ParserT,
+ typename ValueH = typename ParserH::resultType,
+ typename ValueT = typename ParserT::resultType,
+ typename = std::enable_if_t<std::is_same_v<std::list<ValueH>, ValueT>>>
+constexpr auto cons(ParserH head, ParserT tail) {
+ return ConsParser<ParserH, ParserT>(head, tail);
+}
+
// Given a parser P for a wrapper class, invoke P, and if it succeeds return
// the wrapped object.
template <typename Parser> struct UnwrapParser {
@@ -449,6 +482,9 @@ TYPE_PARSER(construct<OmpAllocatorSimpleModifier>(scalarIntExpr))
TYPE_PARSER(construct<OmpAlwaysModifier>( //
"ALWAYS" >> pure(OmpAlwaysModifier::Value::Always)))
+TYPE_PARSER(construct<OmpAutomapModifier>(
+ "AUTOMAP" >> pure(OmpAutomapModifier::Value::Automap)))
+
TYPE_PARSER(construct<OmpChunkModifier>( //
"SIMD" >> pure(OmpChunkModifier::Value::Simd)))
@@ -466,6 +502,8 @@ TYPE_PARSER(construct<OmpDeviceModifier>(
"ANCESTOR" >> pure(OmpDeviceModifier::Value::Ancestor) ||
"DEVICE_NUM" >> pure(OmpDeviceModifier::Value::Device_Num)))
+TYPE_PARSER(construct<OmpDirectiveNameModifier>(OmpDirectiveNameParser{}))
+
TYPE_PARSER(construct<OmpExpectation>( //
"PRESENT" >> pure(OmpExpectation::Value::Present)))
@@ -601,6 +639,9 @@ TYPE_PARSER(sourced(construct<OmpDependClause::TaskDep::Modifier>(sourced(
TYPE_PARSER(
sourced(construct<OmpDeviceClause::Modifier>(Parser<OmpDeviceModifier>{})))
+TYPE_PARSER(
+ sourced(construct<OmpEnterClause::Modifier>(Parser<OmpAutomapModifier>{})))
+
TYPE_PARSER(sourced(construct<OmpFromClause::Modifier>(
sourced(construct<OmpFromClause::Modifier>(Parser<OmpExpectation>{}) ||
construct<OmpFromClause::Modifier>(Parser<OmpMapper>{}) ||
@@ -609,7 +650,8 @@ TYPE_PARSER(sourced(construct<OmpFromClause::Modifier>(
TYPE_PARSER(sourced(
construct<OmpGrainsizeClause::Modifier>(Parser<OmpPrescriptiveness>{})))
-TYPE_PARSER(sourced(construct<OmpIfClause::Modifier>(OmpDirectiveNameParser{})))
+TYPE_PARSER(sourced(
+ construct<OmpIfClause::Modifier>(Parser<OmpDirectiveNameModifier>{})))
TYPE_PARSER(sourced(
construct<OmpInitClause::Modifier>(
@@ -735,6 +777,10 @@ TYPE_PARSER(construct<OmpDefaultClause>(
Parser<OmpDefaultClause::DataSharingAttribute>{}) ||
construct<OmpDefaultClause>(indirect(Parser<OmpDirectiveSpecification>{}))))
+TYPE_PARSER(construct<OmpEnterClause>(
+ maybe(nonemptyList(Parser<OmpEnterClause::Modifier>{}) / ":"),
+ Parser<OmpObjectList>{}))
+
TYPE_PARSER(construct<OmpFailClause>(
"ACQ_REL" >> pure(common::OmpMemoryOrderType::Acq_Rel) ||
"ACQUIRE" >> pure(common::OmpMemoryOrderType::Acquire) ||
@@ -1023,7 +1069,7 @@ TYPE_PARSER( //
"DYNAMIC_ALLOCATORS" >>
construct<OmpClause>(construct<OmpClause::DynamicAllocators>()) ||
"ENTER" >> construct<OmpClause>(construct<OmpClause::Enter>(
- parenthesized(Parser<OmpObjectList>{}))) ||
+ parenthesized(Parser<OmpEnterClause>{}))) ||
"EXCLUSIVE" >> construct<OmpClause>(construct<OmpClause::Exclusive>(
parenthesized(Parser<OmpObjectList>{}))) ||
"FAIL" >> construct<OmpClause>(construct<OmpClause::Fail>(
@@ -1828,19 +1874,20 @@ TYPE_PARSER(
sourced("END"_tok >> Parser<OmpSectionsDirective>{}),
Parser<OmpClauseList>{})))
-// OMP SECTION-BLOCK
-
-TYPE_PARSER(construct<OpenMPSectionConstruct>(block))
-
-TYPE_PARSER(maybe(startOmpLine >> "SECTION"_tok / endOmpLine) >>
- construct<OmpSectionBlocks>(nonemptySeparated(
- construct<OpenMPConstruct>(sourced(Parser<OpenMPSectionConstruct>{})),
- startOmpLine >> "SECTION"_tok / endOmpLine)))
+static constexpr auto sectionDir{
+ startOmpLine >> (predicated(OmpDirectiveNameParser{},
+ IsDirective(llvm::omp::Directive::OMPD_section)) >=
+ Parser<OmpDirectiveSpecification>{})};
// OMP SECTIONS (OpenMP 5.0 - 2.8.1), PARALLEL SECTIONS (OpenMP 5.0 - 2.13.3)
-TYPE_PARSER(construct<OpenMPSectionsConstruct>(
+TYPE_PARSER(sourced(construct<OpenMPSectionsConstruct>(
Parser<OmpBeginSectionsDirective>{} / endOmpLine,
- Parser<OmpSectionBlocks>{}, Parser<OmpEndSectionsDirective>{} / endOmpLine))
+ cons( //
+ construct<OpenMPConstruct>(sourced(
+ construct<OpenMPSectionConstruct>(maybe(sectionDir), block))),
+ many(construct<OpenMPConstruct>(
+ sourced(construct<OpenMPSectionConstruct>(sectionDir, block))))),
+ Parser<OmpEndSectionsDirective>{} / endOmpLine)))
static bool IsExecutionPart(const OmpDirectiveName &name) {
return name.IsExecutionPart();
diff --git a/flang/lib/Parser/unparse.cpp b/flang/lib/Parser/unparse.cpp
index fc15d46..ef209ff 100644
--- a/flang/lib/Parser/unparse.cpp
+++ b/flang/lib/Parser/unparse.cpp
@@ -2250,6 +2250,11 @@ public:
Walk(std::get<OmpObjectList>(x.t));
Walk(": ", std::get<std::optional<std::list<Modifier>>>(x.t));
}
+ void Unparse(const OmpEnterClause &x) {
+ using Modifier = OmpEnterClause::Modifier;
+ Walk(std::get<std::optional<std::list<Modifier>>>(x.t), ": ");
+ Walk(std::get<OmpObjectList>(x.t));
+ }
void Unparse(const OmpFromClause &x) {
using Modifier = OmpFromClause::Modifier;
Walk(std::get<std::optional<std::list<Modifier>>>(x.t), ": ");
@@ -2801,16 +2806,16 @@ public:
break;
}
}
- void Unparse(const OmpSectionBlocks &x) {
- for (const auto &y : x.v) {
+ void Unparse(const OpenMPSectionConstruct &x) {
+ if (auto &&dirSpec{
+ std::get<std::optional<OmpDirectiveSpecification>>(x.t)}) {
BeginOpenMP();
- Word("!$OMP SECTION");
+ Word("!$OMP ");
+ Walk(*dirSpec);
Put("\n");
EndOpenMP();
- // y.u is an OpenMPSectionConstruct
- // (y.u).v is Block
- Walk(std::get<OpenMPSectionConstruct>(y.u).v, "");
}
+ Walk(std::get<Block>(x.t), "");
}
void Unparse(const OpenMPSectionsConstruct &x) {
BeginOpenMP();
@@ -2818,7 +2823,7 @@ public:
Walk(std::get<OmpBeginSectionsDirective>(x.t));
Put("\n");
EndOpenMP();
- Walk(std::get<OmpSectionBlocks>(x.t));
+ Walk(std::get<std::list<OpenMPConstruct>>(x.t), "");
BeginOpenMP();
Word("!$OMP END ");
Walk(std::get<OmpEndSectionsDirective>(x.t));
@@ -2986,6 +2991,7 @@ public:
WALK_NESTED_ENUM(UseStmt, ModuleNature) // R1410
WALK_NESTED_ENUM(OmpAdjustArgsClause::OmpAdjustOp, Value) // OMP adjustop
WALK_NESTED_ENUM(OmpAtClause, ActionTime) // OMP at
+ WALK_NESTED_ENUM(OmpAutomapModifier, Value) // OMP automap-modifier
WALK_NESTED_ENUM(OmpBindClause, Binding) // OMP bind
WALK_NESTED_ENUM(OmpProcBindClause, AffinityPolicy) // OMP proc_bind
WALK_NESTED_ENUM(OmpDefaultClause, DataSharingAttribute) // OMP default
diff --git a/flang/lib/Semantics/check-cuda.cpp b/flang/lib/Semantics/check-cuda.cpp
index b011476..9b48432 100644
--- a/flang/lib/Semantics/check-cuda.cpp
+++ b/flang/lib/Semantics/check-cuda.cpp
@@ -761,14 +761,13 @@ void CUDAChecker::Enter(const parser::AssignmentStmt &x) {
// legal.
if (nbLhs == 0 && nbRhs > 1) {
context_.Say(lhsLoc,
- "More than one reference to a CUDA object on the right hand side of the assigment"_err_en_US);
+ "More than one reference to a CUDA object on the right hand side of the assignment"_err_en_US);
}
- if (Fortran::evaluate::HasCUDADeviceAttrs(assign->lhs) &&
- Fortran::evaluate::HasCUDAImplicitTransfer(assign->rhs)) {
+ if (evaluate::HasCUDADeviceAttrs(assign->lhs) &&
+ evaluate::HasCUDAImplicitTransfer(assign->rhs)) {
if (GetNbOfCUDAManagedOrUnifiedSymbols(assign->lhs) == 1 &&
- GetNbOfCUDAManagedOrUnifiedSymbols(assign->rhs) == 1 &&
- GetNbOfCUDADeviceSymbols(assign->rhs) == 1) {
+ GetNbOfCUDAManagedOrUnifiedSymbols(assign->rhs) == 1 && nbRhs == 1) {
return; // This is a special case handled on the host.
}
context_.Say(lhsLoc, "Unsupported CUDA data transfer"_err_en_US);
diff --git a/flang/lib/Semantics/check-declarations.cpp b/flang/lib/Semantics/check-declarations.cpp
index a2f2906..d769f22 100644
--- a/flang/lib/Semantics/check-declarations.cpp
+++ b/flang/lib/Semantics/check-declarations.cpp
@@ -2081,7 +2081,7 @@ static bool ConflictsWithIntrinsicAssignment(const Procedure &proc) {
}
static bool ConflictsWithIntrinsicOperator(
- const GenericKind &kind, const Procedure &proc) {
+ const GenericKind &kind, const Procedure &proc, SemanticsContext &context) {
if (!kind.IsIntrinsicOperator()) {
return false;
}
@@ -2167,7 +2167,7 @@ bool CheckHelper::CheckDefinedOperator(SourceName opName, GenericKind kind,
}
} else if (!checkDefinedOperatorArgs(opName, specific, proc)) {
return false; // error was reported
- } else if (ConflictsWithIntrinsicOperator(kind, proc)) {
+ } else if (ConflictsWithIntrinsicOperator(kind, proc, context_)) {
msg = "%s function '%s' conflicts with intrinsic operator"_err_en_US;
}
if (msg) {
diff --git a/flang/lib/Semantics/check-omp-structure.cpp b/flang/lib/Semantics/check-omp-structure.cpp
index d214d22..20a86e9 100644
--- a/flang/lib/Semantics/check-omp-structure.cpp
+++ b/flang/lib/Semantics/check-omp-structure.cpp
@@ -1057,10 +1057,11 @@ void OmpStructureChecker::Enter(const parser::OpenMPSectionsConstruct &x) {
PushContextAndClauseSets(beginDir.source, beginDir.v);
AddEndDirectiveClauses(std::get<parser::OmpClauseList>(endSectionsDir.t));
- const auto &sectionBlocks{std::get<parser::OmpSectionBlocks>(x.t)};
- for (const parser::OpenMPConstruct &block : sectionBlocks.v) {
- CheckNoBranching(std::get<parser::OpenMPSectionConstruct>(block.u).v,
- beginDir.v, beginDir.source);
+ const auto &sectionBlocks{std::get<std::list<parser::OpenMPConstruct>>(x.t)};
+ for (const parser::OpenMPConstruct &construct : sectionBlocks) {
+ auto &section{std::get<parser::OpenMPSectionConstruct>(construct.u)};
+ CheckNoBranching(
+ std::get<parser::Block>(section.t), beginDir.v, beginDir.source);
}
HasInvalidWorksharingNesting(
beginDir.source, llvm::omp::nestedWorkshareErrSet);
@@ -1568,9 +1569,10 @@ void OmpStructureChecker::Leave(const parser::OpenMPDeclareTargetConstruct &x) {
},
[&](const parser::OmpClause::Enter &enterClause) {
enterClauseFound = true;
- CheckSymbolNames(dir.source, enterClause.v);
- CheckVarIsNotPartOfAnotherVar(dir.source, enterClause.v);
- CheckThreadprivateOrDeclareTargetVar(enterClause.v);
+ auto &objList{std::get<parser::OmpObjectList>(enterClause.v.t)};
+ CheckSymbolNames(dir.source, objList);
+ CheckVarIsNotPartOfAnotherVar(dir.source, objList);
+ CheckThreadprivateOrDeclareTargetVar(objList);
},
[&](const parser::OmpClause::DeviceType &deviceTypeClause) {
deviceTypeClauseFound = true;
@@ -4028,7 +4030,11 @@ void OmpStructureChecker::Enter(const parser::OmpClause::HasDeviceAddr &x) {
void OmpStructureChecker::Enter(const parser::OmpClause::Enter &x) {
CheckAllowedClause(llvm::omp::Clause::OMPC_enter);
- const parser::OmpObjectList &objList{x.v};
+ if (!OmpVerifyModifiers(
+ x.v, llvm::omp::OMPC_enter, GetContext().clauseSource, context_)) {
+ return;
+ }
+ const parser::OmpObjectList &objList{std::get<parser::OmpObjectList>(x.v.t)};
SymbolSourceMap symbols;
GetSymbolsInObjectList(objList, symbols);
for (const auto &[symbol, source] : symbols) {
@@ -4488,17 +4494,18 @@ const parser::OmpObjectList *OmpStructureChecker::GetOmpObjectList(
const parser::OmpClause &clause) {
// Clauses with OmpObjectList as its data member
- using MemberObjectListClauses = std::tuple<parser::OmpClause::Copyprivate,
- parser::OmpClause::Copyin, parser::OmpClause::Enter,
- parser::OmpClause::Firstprivate, parser::OmpClause::Link,
- parser::OmpClause::Private, parser::OmpClause::Shared,
- parser::OmpClause::UseDevicePtr, parser::OmpClause::UseDeviceAddr>;
+ using MemberObjectListClauses =
+ std::tuple<parser::OmpClause::Copyprivate, parser::OmpClause::Copyin,
+ parser::OmpClause::Firstprivate, parser::OmpClause::Link,
+ parser::OmpClause::Private, parser::OmpClause::Shared,
+ parser::OmpClause::UseDevicePtr, parser::OmpClause::UseDeviceAddr>;
// Clauses with OmpObjectList in the tuple
- using TupleObjectListClauses = std::tuple<parser::OmpClause::Aligned,
- parser::OmpClause::Allocate, parser::OmpClause::From,
- parser::OmpClause::Lastprivate, parser::OmpClause::Map,
- parser::OmpClause::Reduction, parser::OmpClause::To>;
+ using TupleObjectListClauses =
+ std::tuple<parser::OmpClause::Aligned, parser::OmpClause::Allocate,
+ parser::OmpClause::From, parser::OmpClause::Lastprivate,
+ parser::OmpClause::Map, parser::OmpClause::Reduction,
+ parser::OmpClause::To, parser::OmpClause::Enter>;
// TODO:: Generate the tuples using TableGen.
// Handle other constructs with OmpObjectList such as OpenMPThreadprivate.
diff --git a/flang/lib/Semantics/expression.cpp b/flang/lib/Semantics/expression.cpp
index 1447372..92dbe0e 100644
--- a/flang/lib/Semantics/expression.cpp
+++ b/flang/lib/Semantics/expression.cpp
@@ -165,10 +165,17 @@ public:
bool CheckForNullPointer(const char *where = "as an operand here");
bool CheckForAssumedRank(const char *where = "as an operand here");
+ bool AnyCUDADeviceData() const;
+ // Returns true if an interface has been defined for an intrinsic operator
+ // with one or more device operands.
+ bool HasDeviceDefinedIntrinsicOpOverride(const char *) const;
+ template <typename E> bool HasDeviceDefinedIntrinsicOpOverride(E opr) const {
+ return HasDeviceDefinedIntrinsicOpOverride(
+ context_.context().languageFeatures().GetNames(opr));
+ }
+
// Find and return a user-defined operator or report an error.
// The provided message is used if there is no such operator.
- // If a definedOpSymbolPtr is provided, the caller must check
- // for its accessibility.
MaybeExpr TryDefinedOp(
const char *, parser::MessageFixedText, bool isUserOp = false);
template <typename E>
@@ -183,6 +190,8 @@ public:
void Dump(llvm::raw_ostream &);
private:
+ bool HasDeviceDefinedIntrinsicOpOverride(
+ const std::vector<const char *> &) const;
MaybeExpr TryDefinedOp(
const std::vector<const char *> &, parser::MessageFixedText);
MaybeExpr TryBoundOp(const Symbol &, int passIndex);
@@ -202,7 +211,7 @@ private:
void SayNoMatch(
const std::string &, bool isAssignment = false, bool isAmbiguous = false);
std::string TypeAsFortran(std::size_t);
- bool AnyUntypedOrMissingOperand();
+ bool AnyUntypedOrMissingOperand() const;
ExpressionAnalyzer &context_;
ActualArguments actuals_;
@@ -4497,13 +4506,20 @@ void ArgumentAnalyzer::Analyze(
bool ArgumentAnalyzer::IsIntrinsicRelational(RelationalOperator opr,
const DynamicType &leftType, const DynamicType &rightType) const {
CHECK(actuals_.size() == 2);
- return semantics::IsIntrinsicRelational(
- opr, leftType, GetRank(0), rightType, GetRank(1));
+ return !(context_.context().languageFeatures().IsEnabled(
+ common::LanguageFeature::CUDA) &&
+ HasDeviceDefinedIntrinsicOpOverride(opr)) &&
+ semantics::IsIntrinsicRelational(
+ opr, leftType, GetRank(0), rightType, GetRank(1));
}
bool ArgumentAnalyzer::IsIntrinsicNumeric(NumericOperator opr) const {
std::optional<DynamicType> leftType{GetType(0)};
- if (actuals_.size() == 1) {
+ if (context_.context().languageFeatures().IsEnabled(
+ common::LanguageFeature::CUDA) &&
+ HasDeviceDefinedIntrinsicOpOverride(AsFortran(opr))) {
+ return false;
+ } else if (actuals_.size() == 1) {
if (IsBOZLiteral(0)) {
return opr == NumericOperator::Add; // unary '+'
} else {
@@ -4617,6 +4633,53 @@ bool ArgumentAnalyzer::CheckForAssumedRank(const char *where) {
return true;
}
+bool ArgumentAnalyzer::AnyCUDADeviceData() const {
+ for (const std::optional<ActualArgument> &arg : actuals_) {
+ if (arg) {
+ if (const Expr<SomeType> *expr{arg->UnwrapExpr()}) {
+ if (HasCUDADeviceAttrs(*expr)) {
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+// Some operations can be defined with explicit non-type-bound interfaces
+// that would erroneously conflict with intrinsic operations in their
+// types and ranks but have one or more dummy arguments with the DEVICE
+// attribute.
+bool ArgumentAnalyzer::HasDeviceDefinedIntrinsicOpOverride(
+ const char *opr) const {
+ if (AnyCUDADeviceData() && !AnyUntypedOrMissingOperand()) {
+ std::string oprNameString{"operator("s + opr + ')'};
+ parser::CharBlock oprName{oprNameString};
+ parser::Messages buffer;
+ auto restorer{context_.GetContextualMessages().SetMessages(buffer)};
+ const auto &scope{context_.context().FindScope(source_)};
+ if (Symbol * generic{scope.FindSymbol(oprName)}) {
+ parser::Name name{generic->name(), generic};
+ const Symbol *resultSymbol{nullptr};
+ if (context_.AnalyzeDefinedOp(
+ name, ActualArguments{actuals_}, resultSymbol)) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool ArgumentAnalyzer::HasDeviceDefinedIntrinsicOpOverride(
+ const std::vector<const char *> &oprNames) const {
+ for (const char *opr : oprNames) {
+ if (HasDeviceDefinedIntrinsicOpOverride(opr)) {
+ return true;
+ }
+ }
+ return false;
+}
+
MaybeExpr ArgumentAnalyzer::TryDefinedOp(
const char *opr, parser::MessageFixedText error, bool isUserOp) {
if (AnyUntypedOrMissingOperand()) {
@@ -5135,7 +5198,7 @@ std::string ArgumentAnalyzer::TypeAsFortran(std::size_t i) {
}
}
-bool ArgumentAnalyzer::AnyUntypedOrMissingOperand() {
+bool ArgumentAnalyzer::AnyUntypedOrMissingOperand() const {
for (const auto &actual : actuals_) {
if (!actual ||
(!actual->GetType() && !IsBareNullPointer(actual->UnwrapExpr()))) {
diff --git a/flang/lib/Semantics/openmp-modifiers.cpp b/flang/lib/Semantics/openmp-modifiers.cpp
index 336ce4b..af4000c 100644
--- a/flang/lib/Semantics/openmp-modifiers.cpp
+++ b/flang/lib/Semantics/openmp-modifiers.cpp
@@ -157,6 +157,22 @@ const OmpModifierDescriptor &OmpGetDescriptor<parser::OmpAlwaysModifier>() {
}
template <>
+const OmpModifierDescriptor &OmpGetDescriptor<parser::OmpAutomapModifier>() {
+ static const OmpModifierDescriptor desc{
+ /*name=*/"automap-modifier",
+ /*props=*/
+ {
+ {60, {OmpProperty::Unique}},
+ },
+ /*clauses=*/
+ {
+ {60, {Clause::OMPC_enter}},
+ },
+ };
+ return desc;
+}
+
+template <>
const OmpModifierDescriptor &OmpGetDescriptor<parser::OmpChunkModifier>() {
static const OmpModifierDescriptor desc{
/*name=*/"chunk-modifier",
diff --git a/flang/lib/Semantics/pointer-assignment.cpp b/flang/lib/Semantics/pointer-assignment.cpp
index 0908769..e767bf8 100644
--- a/flang/lib/Semantics/pointer-assignment.cpp
+++ b/flang/lib/Semantics/pointer-assignment.cpp
@@ -270,18 +270,18 @@ bool PointerAssignmentChecker::Check(const evaluate::FunctionRef<T> &f) {
std::optional<MessageFixedText> msg;
const auto &funcResult{proc->functionResult}; // C1025
if (!funcResult) {
- msg = "%s is associated with the non-existent result of reference to"
- " procedure"_err_en_US;
+ msg =
+ "%s is associated with the non-existent result of reference to procedure"_err_en_US;
} else if (CharacterizeProcedure()) {
// Shouldn't be here in this function unless lhs is an object pointer.
- msg = "Procedure %s is associated with the result of a reference to"
- " function '%s' that does not return a procedure pointer"_err_en_US;
+ msg =
+ "Procedure %s is associated with the result of a reference to function '%s' that does not return a procedure pointer"_err_en_US;
} else if (funcResult->IsProcedurePointer()) {
- msg = "Object %s is associated with the result of a reference to"
- " function '%s' that is a procedure pointer"_err_en_US;
+ msg =
+ "Object %s is associated with the result of a reference to function '%s' that is a procedure pointer"_err_en_US;
} else if (!funcResult->attrs.test(FunctionResult::Attr::Pointer)) {
- msg = "%s is associated with the result of a reference to function '%s'"
- " that is a not a pointer"_err_en_US;
+ msg =
+ "%s is associated with the result of a reference to function '%s' that is not a pointer"_err_en_US;
} else if (isContiguous_ &&
!funcResult->attrs.test(FunctionResult::Attr::Contiguous)) {
auto restorer{common::ScopedSet(lhs_, symbol)};
diff --git a/flang/lib/Semantics/resolve-directives.cpp b/flang/lib/Semantics/resolve-directives.cpp
index 4c3e509..cb68369 100644
--- a/flang/lib/Semantics/resolve-directives.cpp
+++ b/flang/lib/Semantics/resolve-directives.cpp
@@ -1545,6 +1545,7 @@ void AccAttributeVisitor::Post(const parser::AccDefaultClause &x) {
void AccAttributeVisitor::Post(const parser::Name &name) {
auto *symbol{name.symbol};
if (symbol && !dirContext_.empty() && GetContext().withinConstruct) {
+ symbol = &symbol->GetUltimate();
if (!symbol->owner().IsDerivedType() && !symbol->has<ProcEntityDetails>() &&
!symbol->has<SubprogramDetails>() && !IsObjectWithDSA(*symbol)) {
if (Symbol * found{currScope().FindSymbol(name.source)}) {
@@ -1553,8 +1554,7 @@ void AccAttributeVisitor::Post(const parser::Name &name) {
} else if (GetContext().defaultDSA == Symbol::Flag::AccNone) {
// 2.5.14.
context_.Say(name.source,
- "The DEFAULT(NONE) clause requires that '%s' must be listed in "
- "a data-mapping clause"_err_en_US,
+ "The DEFAULT(NONE) clause requires that '%s' must be listed in a data-mapping clause"_err_en_US,
symbol->name());
}
}
@@ -2152,7 +2152,8 @@ bool OmpAttributeVisitor::Pre(const parser::OpenMPDeclareTargetConstruct &x) {
ResolveOmpObjectList(linkClause->v, Symbol::Flag::OmpDeclareTarget);
} else if (const auto *enterClause{
std::get_if<parser::OmpClause::Enter>(&clause.u)}) {
- ResolveOmpObjectList(enterClause->v, Symbol::Flag::OmpDeclareTarget);
+ ResolveOmpObjectList(std::get<parser::OmpObjectList>(enterClause->v.t),
+ Symbol::Flag::OmpDeclareTarget);
}
}
}
diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp
index d08c669..2611470 100644
--- a/flang/lib/Semantics/resolve-names.cpp
+++ b/flang/lib/Semantics/resolve-names.cpp
@@ -1646,7 +1646,8 @@ public:
populateDeclareTargetNames(linkClause->v);
} else if (const auto *enterClause{
std::get_if<parser::OmpClause::Enter>(&clause.u)}) {
- populateDeclareTargetNames(enterClause->v);
+ populateDeclareTargetNames(
+ std::get<parser::OmpObjectList>(enterClause->v.t));
}
}
}
diff --git a/flang/test/Examples/omp-atomic.f90 b/flang/test/Examples/omp-atomic.f90
index 934f84f..5695b62 100644
--- a/flang/test/Examples/omp-atomic.f90
+++ b/flang/test/Examples/omp-atomic.f90
@@ -31,13 +31,13 @@ end
! CHECK-NEXT: - clause: read
! CHECK-NEXT: details: ''
! CHECK-NEXT: - clause: seq_cst
-! CHECK-NEXT: details: 'name_modifier=atomic;'
+! CHECK-NEXT: details: ''
! CHECK-NEXT:- file: '{{[^"]*}}omp-atomic.f90'
! CHECK-NEXT: line: 12
! CHECK-NEXT: construct: atomic
! CHECK-NEXT: clauses:
! CHECK-NEXT: - clause: seq_cst
-! CHECK-NEXT: details: 'name_modifier=atomic;'
+! CHECK-NEXT: details: ''
! CHECK-NEXT: - clause: write
! CHECK-NEXT: details: ''
! CHECK-NEXT:- file: '{{[^"]*}}omp-atomic.f90'
@@ -45,7 +45,7 @@ end
! CHECK-NEXT: construct: atomic
! CHECK-NEXT: clauses:
! CHECK-NEXT: - clause: capture
-! CHECK-NEXT: details: 'name_modifier=atomic;name_modifier=atomic;'
+! CHECK-NEXT: details: ''
! CHECK-NEXT: - clause: seq_cst
! CHECK-NEXT: details: ''
! CHECK-NEXT:- file: '{{[^"]*}}omp-atomic.f90'
diff --git a/flang/test/Examples/omp-sections.f90 b/flang/test/Examples/omp-sections.f90
index 41e6e8f..a6d2806 100644
--- a/flang/test/Examples/omp-sections.f90
+++ b/flang/test/Examples/omp-sections.f90
@@ -13,11 +13,11 @@ subroutine omp_sections()
end subroutine omp_sections
!CHECK: - file: {{.*}}
-!CHECK: line: 9
+!CHECK: line: 8
!CHECK: construct: section
!CHECK: clauses: []
!CHECK: - file: {{.*}}
-!CHECK: line: 11
+!CHECK: line: 10
!CHECK: construct: section
!CHECK: clauses: []
!CHECK: - file: {{.*}}
diff --git a/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90 b/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90
index df85942..916bd66 100644
--- a/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90
+++ b/flang/test/Parser/OpenMP/declare-target-indirect-tree.f90
@@ -15,7 +15,8 @@ module functions
contains
function func1() result(i)
!$omp declare target enter(func1) indirect(.true.)
- !CHECK: | | | | | OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> Enter -> OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'func1'
+ !CHECK: | | | | | OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> Enter -> OmpEnterClause
+ !CHECK-NEXT: | | | | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'func1'
!CHECK-NEXT: | | | | | OmpClause -> Indirect -> OmpIndirectClause -> Scalar -> Logical -> Expr = '.true._4'
!CHECK-NEXT: | | | | | | LiteralConstant -> LogicalLiteralConstant
!CHECK-NEXT: | | | | | | | bool = 'true'
@@ -26,7 +27,8 @@ contains
function func2() result(i)
!$omp declare target enter(func2) indirect
- !CHECK: | | | | | OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> Enter -> OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'func2'
+ !CHECK: | | | | | OmpDeclareTargetSpecifier -> OmpDeclareTargetWithClause -> OmpClauseList -> OmpClause -> Enter -> OmpEnterClause
+ !CHECK-NEXT: | | | | | | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'func2'
!CHECK-NEXT: | | | | | OmpClause -> Indirect -> OmpIndirectClause ->
character(1) :: i
i = 'b'
diff --git a/flang/test/Parser/OpenMP/enter-automap-modifier.f90 b/flang/test/Parser/OpenMP/enter-automap-modifier.f90
new file mode 100644
index 0000000..1f361ca5
--- /dev/null
+++ b/flang/test/Parser/OpenMP/enter-automap-modifier.f90
@@ -0,0 +1,16 @@
+!RUN: %flang_fc1 -fdebug-unparse -fopenmp -fopenmp-version=60 %s | FileCheck %s --check-prefix=UNPARSE
+!RUN: %flang_fc1 -fdebug-dump-parse-tree -fopenmp -fopenmp-version=60 %s | FileCheck %s --check-prefix=PARSE-TREE
+
+program automap
+ integer :: x
+ !$omp declare target enter(automap: x)
+end program
+
+!UNPARSE: PROGRAM AUTOMAP
+!UNPARSE: INTEGER x
+!UNPARSE: !$OMP DECLARE TARGET ENTER(AUTOMAP: x)
+!UNPARSE: END PROGRAM
+
+!PARSE-TREE: OmpClauseList -> OmpClause -> Enter -> OmpEnterClause
+!PARSE-TREE-NEXT: | Modifier -> OmpAutomapModifier -> Value = Automap
+!PARSE-TREE-NEXT: | OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x'
diff --git a/flang/test/Parser/OpenMP/sections.f90 b/flang/test/Parser/OpenMP/sections.f90
index 3752cef..8ba2294 100644
--- a/flang/test/Parser/OpenMP/sections.f90
+++ b/flang/test/Parser/OpenMP/sections.f90
@@ -10,32 +10,41 @@ subroutine openmp_sections(x, y)
!==============================================================================
!CHECK: !$omp sections
!$omp sections
- !CHECK: !$omp section
!CHECK: !$omp end sections
!$omp end sections
-!PARSE-TREE: OpenMPConstruct -> OpenMPSectionsConstruct
-!PARSE-TREE: OmpBeginSectionsDirective
-!PARSE-TREE-NOT: ExecutionPartConstruct
-!PARSE-TREE: OmpEndSectionsDirective
+!PARSE-TREE: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPSectionsConstruct
+!PARSE-TREE: | OmpBeginSectionsDirective
+!PARSE-TREE: | | OmpSectionsDirective -> llvm::omp::Directive = sections
+!PARSE-TREE: | | OmpClauseList ->
+!PARSE-TREE: | OpenMPConstruct -> OpenMPSectionConstruct
+!PARSE-TREE: | | Block
+!PARSE-TREE: | OmpEndSectionsDirective
+!PARSE-TREE: | | OmpSectionsDirective -> llvm::omp::Directive = sections
+!PARSE-TREE: | | OmpClauseList ->
!==============================================================================
! single section, without `!$omp section`
!==============================================================================
!CHECK: !$omp sections
!$omp sections
- !CHECK: !$omp section
!CHECK: CALL
call F1()
!CHECK: !$omp end sections
!$omp end sections
-!PARSE-TREE: OpenMPConstruct -> OpenMPSectionsConstruct
-!PARSE-TREE: OmpBeginSectionsDirective
-!PARSE-TREE: OpenMPConstruct -> OpenMPSectionConstruct -> Block
-!PARSE-TREE: CallStmt
-!PARSE-TREE-NOT: ExecutionPartConstruct
-!PARSE-TREE: OmpEndSectionsDirective
+!PARSE-TREE: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPSectionsConstruct
+!PARSE-TREE: | OmpBeginSectionsDirective
+!PARSE-TREE: | | OmpSectionsDirective -> llvm::omp::Directive = sections
+!PARSE-TREE: | | OmpClauseList ->
+!PARSE-TREE: | OpenMPConstruct -> OpenMPSectionConstruct
+!PARSE-TREE: | | Block
+!PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f1()'
+!PARSE-TREE: | | | | Call
+!PARSE-TREE: | | | | | ProcedureDesignator -> Name = 'f1'
+!PARSE-TREE: | OmpEndSectionsDirective
+!PARSE-TREE: | | OmpSectionsDirective -> llvm::omp::Directive = sections
+!PARSE-TREE: | | OmpClauseList ->
!==============================================================================
! single section with `!$omp section`
@@ -49,12 +58,22 @@ subroutine openmp_sections(x, y)
!CHECK: !$omp end sections
!$omp end sections
-!PARSE-TREE: OpenMPConstruct -> OpenMPSectionsConstruct
-!PARSE-TREE: OmpBeginSectionsDirective
-!PARSE-TREE: OpenMPConstruct -> OpenMPSectionConstruct -> Block
-!PARSE-TREE: CallStmt
-!PARSE-TREE-NOT: ExecutionPartConstruct
-!PARSE-TREE: OmpEndSectionsDirective
+!PARSE-TREE: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPSectionsConstruct
+!PARSE-TREE: | OmpBeginSectionsDirective
+!PARSE-TREE: | | OmpSectionsDirective -> llvm::omp::Directive = sections
+!PARSE-TREE: | | OmpClauseList ->
+!PARSE-TREE: | OpenMPConstruct -> OpenMPSectionConstruct
+!PARSE-TREE: | | OmpDirectiveSpecification
+!PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = section
+!PARSE-TREE: | | | OmpClauseList ->
+!PARSE-TREE: | | | Flags = None
+!PARSE-TREE: | | Block
+!PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f1()'
+!PARSE-TREE: | | | | Call
+!PARSE-TREE: | | | | | ProcedureDesignator -> Name = 'f1'
+!PARSE-TREE: | OmpEndSectionsDirective
+!PARSE-TREE: | | OmpSectionsDirective -> llvm::omp::Directive = sections
+!PARSE-TREE: | | OmpClauseList ->
!==============================================================================
! multiple sections
@@ -76,16 +95,40 @@ subroutine openmp_sections(x, y)
!CHECK: !$omp end sections
!$omp end sections
-!PARSE-TREE: OpenMPConstruct -> OpenMPSectionsConstruct
-!PARSE-TREE: OmpBeginSectionsDirective
-!PARSE-TREE: OpenMPConstruct -> OpenMPSectionConstruct -> Block
-!PARSE-TREE: CallStmt
-!PARSE-TREE: OpenMPConstruct -> OpenMPSectionConstruct -> Block
-!PARSE-TREE: CallStmt
-!PARSE-TREE: OpenMPConstruct -> OpenMPSectionConstruct -> Block
-!PARSE-TREE: CallStmt
-!PARSE-TREE-NOT: ExecutionPartConstruct
-!PARSE-TREE: OmpEndSectionsDirective
+!PARSE-TREE: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPSectionsConstruct
+!PARSE-TREE: | OmpBeginSectionsDirective
+!PARSE-TREE: | | OmpSectionsDirective -> llvm::omp::Directive = sections
+!PARSE-TREE: | | OmpClauseList ->
+!PARSE-TREE: | OpenMPConstruct -> OpenMPSectionConstruct
+!PARSE-TREE: | | OmpDirectiveSpecification
+!PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = section
+!PARSE-TREE: | | | OmpClauseList ->
+!PARSE-TREE: | | | Flags = None
+!PARSE-TREE: | | Block
+!PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f1()'
+!PARSE-TREE: | | | | Call
+!PARSE-TREE: | | | | | ProcedureDesignator -> Name = 'f1'
+!PARSE-TREE: | OpenMPConstruct -> OpenMPSectionConstruct
+!PARSE-TREE: | | OmpDirectiveSpecification
+!PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = section
+!PARSE-TREE: | | | OmpClauseList ->
+!PARSE-TREE: | | | Flags = None
+!PARSE-TREE: | | Block
+!PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f2()'
+!PARSE-TREE: | | | | Call
+!PARSE-TREE: | | | | | ProcedureDesignator -> Name = 'f2'
+!PARSE-TREE: | OpenMPConstruct -> OpenMPSectionConstruct
+!PARSE-TREE: | | OmpDirectiveSpecification
+!PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = section
+!PARSE-TREE: | | | OmpClauseList ->
+!PARSE-TREE: | | | Flags = None
+!PARSE-TREE: | | Block
+!PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f3()'
+!PARSE-TREE: | | | | Call
+!PARSE-TREE: | | | | | ProcedureDesignator -> Name = 'f3'
+!PARSE-TREE: | OmpEndSectionsDirective
+!PARSE-TREE: | | OmpSectionsDirective -> llvm::omp::Directive = sections
+!PARSE-TREE: | | OmpClauseList ->
!==============================================================================
! multiple sections with clauses
@@ -107,15 +150,40 @@ subroutine openmp_sections(x, y)
!CHECK: !$omp end sections NOWAIT
!$omp end sections NOWAIT
-!PARSE-TREE: OpenMPConstruct -> OpenMPSectionsConstruct
-!PARSE-TREE: OmpBeginSectionsDirective
-!PARSE-TREE: OpenMPConstruct -> OpenMPSectionConstruct -> Block
-!PARSE-TREE: CallStmt
-!PARSE-TREE: OpenMPConstruct -> OpenMPSectionConstruct -> Block
-!PARSE-TREE: CallStmt
-!PARSE-TREE: OpenMPConstruct -> OpenMPSectionConstruct -> Block
-!PARSE-TREE: CallStmt
-!PARSE-TREE-NOT: ExecutionPartConstruct
-!PARSE-TREE: OmpEndSectionsDirective
+!PARSE-TREE: ExecutionPartConstruct -> ExecutableConstruct -> OpenMPConstruct -> OpenMPSectionsConstruct
+!PARSE-TREE: | OmpBeginSectionsDirective
+!PARSE-TREE: | | OmpSectionsDirective -> llvm::omp::Directive = sections
+!PARSE-TREE: | | OmpClauseList -> OmpClause -> Private -> OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'x'
+!PARSE-TREE: | | OmpClause -> Firstprivate -> OmpObjectList -> OmpObject -> Designator -> DataRef -> Name = 'y'
+!PARSE-TREE: | OpenMPConstruct -> OpenMPSectionConstruct
+!PARSE-TREE: | | OmpDirectiveSpecification
+!PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = section
+!PARSE-TREE: | | | OmpClauseList ->
+!PARSE-TREE: | | | Flags = None
+!PARSE-TREE: | | Block
+!PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f1()'
+!PARSE-TREE: | | | | Call
+!PARSE-TREE: | | | | | ProcedureDesignator -> Name = 'f1'
+!PARSE-TREE: | OpenMPConstruct -> OpenMPSectionConstruct
+!PARSE-TREE: | | OmpDirectiveSpecification
+!PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = section
+!PARSE-TREE: | | | OmpClauseList ->
+!PARSE-TREE: | | | Flags = None
+!PARSE-TREE: | | Block
+!PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f2()'
+!PARSE-TREE: | | | | Call
+!PARSE-TREE: | | | | | ProcedureDesignator -> Name = 'f2'
+!PARSE-TREE: | OpenMPConstruct -> OpenMPSectionConstruct
+!PARSE-TREE: | | OmpDirectiveSpecification
+!PARSE-TREE: | | | OmpDirectiveName -> llvm::omp::Directive = section
+!PARSE-TREE: | | | OmpClauseList ->
+!PARSE-TREE: | | | Flags = None
+!PARSE-TREE: | | Block
+!PARSE-TREE: | | | ExecutionPartConstruct -> ExecutableConstruct -> ActionStmt -> CallStmt = 'CALL f3()'
+!PARSE-TREE: | | | | Call
+!PARSE-TREE: | | | | | ProcedureDesignator -> Name = 'f3'
+!PARSE-TREE: | OmpEndSectionsDirective
+!PARSE-TREE: | | OmpSectionsDirective -> llvm::omp::Directive = sections
+!PARSE-TREE: | | OmpClauseList -> OmpClause -> Nowait
END subroutine openmp_sections
diff --git a/flang/test/Semantics/OpenACC/acc-default-none-function.f90 b/flang/test/Semantics/OpenACC/acc-default-none-function.f90
new file mode 100644
index 0000000..f0a697f
--- /dev/null
+++ b/flang/test/Semantics/OpenACC/acc-default-none-function.f90
@@ -0,0 +1,20 @@
+! RUN: %python %S/../test_errors.py %s %flang -fopenacc -pedantic
+
+module mm_acc_rout_function
+contains
+ integer function dosomething(res)
+ !$acc routine seq
+ integer :: res
+ dosomething = res + 1
+ end function
+end module
+
+program main
+ use mm_acc_rout_function
+ implicit none
+ integer :: res = 1
+ !$acc serial default(none) copy(res)
+ res = dosomething(res)
+ !$acc end serial
+end program
+
diff --git a/flang/test/Semantics/assign02.f90 b/flang/test/Semantics/assign02.f90
index f998197..c447078 100644
--- a/flang/test/Semantics/assign02.f90
+++ b/flang/test/Semantics/assign02.f90
@@ -139,7 +139,7 @@ contains
real, target :: x
real, pointer :: p
p => f1()
- !ERROR: pointer 'p' is associated with the result of a reference to function 'f2' that is a not a pointer
+ !ERROR: pointer 'p' is associated with the result of a reference to function 'f2' that is not a pointer
p => f2()
contains
function f1()
diff --git a/flang/test/Semantics/bug1214.cuf b/flang/test/Semantics/bug1214.cuf
new file mode 100644
index 0000000..114fad1
--- /dev/null
+++ b/flang/test/Semantics/bug1214.cuf
@@ -0,0 +1,49 @@
+! RUN: %flang_fc1 -fdebug-unparse %s 2>&1 | FileCheck %s
+module overrides
+ type realResult
+ real a
+ end type
+ interface operator(*)
+ procedure :: multHostDevice, multDeviceHost
+ end interface
+ interface assignment(=)
+ procedure :: assignHostResult, assignDeviceResult
+ end interface
+ contains
+ elemental function multHostDevice(x, y) result(result)
+ real, intent(in) :: x
+ real, intent(in), device :: y
+ type(realResult) result
+ result%a = x * y
+ end
+ elemental function multDeviceHost(x, y) result(result)
+ real, intent(in), device :: x
+ real, intent(in) :: y
+ type(realResult) result
+ result%a = x * y
+ end
+ elemental subroutine assignHostResult(lhs, rhs)
+ real, intent(out) :: lhs
+ type(realResult), intent(in) :: rhs
+ lhs = rhs%a
+ end
+ elemental subroutine assignDeviceResult(lhs, rhs)
+ real, intent(out), device :: lhs
+ type(realResult), intent(in) :: rhs
+ lhs = rhs%a
+ end
+end
+
+program p
+ use overrides
+ real, device :: da, db
+ real :: ha, hb
+!CHECK: CALL assigndeviceresult(db,multhostdevice(2._4,da))
+ db = 2. * da
+!CHECK: CALL assigndeviceresult(db,multdevicehost(da,2._4))
+ db = da * 2.
+!CHECK: CALL assignhostresult(ha,multhostdevice(2._4,da))
+ ha = 2. * da
+!CHECK: CALL assignhostresult(ha,multdevicehost(da,2._4))
+ ha = da * 2.
+end
diff --git a/flang/test/Semantics/cuf11.cuf b/flang/test/Semantics/cuf11.cuf
index 554ac25..1f5beb0 100644
--- a/flang/test/Semantics/cuf11.cuf
+++ b/flang/test/Semantics/cuf11.cuf
@@ -16,7 +16,7 @@ subroutine sub1()
real, device :: adev(10), bdev(10)
real :: ahost(10)
-!ERROR: More than one reference to a CUDA object on the right hand side of the assigment
+!ERROR: More than one reference to a CUDA object on the right hand side of the assignment
ahost = adev + bdev
ahost = adev + adev
diff --git a/flang/tools/flang-driver/driver.cpp b/flang/tools/flang-driver/driver.cpp
index 3a2dffc..8321b16 100644
--- a/flang/tools/flang-driver/driver.cpp
+++ b/flang/tools/flang-driver/driver.cpp
@@ -123,15 +123,14 @@ int main(int argc, const char **argv) {
// Create DiagnosticsEngine for the compiler driver
std::unique_ptr<clang::DiagnosticOptions> diagOpts =
createAndPopulateDiagOpts(args);
- llvm::IntrusiveRefCntPtr<clang::DiagnosticIDs> diagID(
- new clang::DiagnosticIDs());
Fortran::frontend::TextDiagnosticPrinter *diagClient =
new Fortran::frontend::TextDiagnosticPrinter(llvm::errs(), *diagOpts);
diagClient->setPrefix(
std::string(llvm::sys::path::stem(getExecutablePath(args[0]))));
- clang::DiagnosticsEngine diags(diagID, *diagOpts, diagClient);
+ clang::DiagnosticsEngine diags(clang::DiagnosticIDs::create(), *diagOpts,
+ diagClient);
// Prepare the driver
clang::driver::Driver theDriver(driverPath,
diff --git a/flang/tools/flang-driver/fc1_main.cpp b/flang/tools/flang-driver/fc1_main.cpp
index f2cd513..d9b103d 100644
--- a/flang/tools/flang-driver/fc1_main.cpp
+++ b/flang/tools/flang-driver/fc1_main.cpp
@@ -65,10 +65,9 @@ int fc1_main(llvm::ArrayRef<const char *> argv, const char *argv0) {
// Create CompilerInvocation - use a dedicated instance of DiagnosticsEngine
// for parsing the arguments
- llvm::IntrusiveRefCntPtr<clang::DiagnosticIDs> diagID(
- new clang::DiagnosticIDs());
clang::DiagnosticOptions diagOpts;
- clang::DiagnosticsEngine diags(diagID, diagOpts, diagsBuffer);
+ clang::DiagnosticsEngine diags(clang::DiagnosticIDs::create(), diagOpts,
+ diagsBuffer);
bool success = CompilerInvocation::createFromArgs(flang->getInvocation(),
argv, diags, argv0);
diff --git a/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake b/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
index 2478fde..d85c393 100644
--- a/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
+++ b/libc/cmake/modules/LLVMLibCCompileOptionRules.cmake
@@ -108,6 +108,10 @@ function(_get_compile_options_from_config output_var)
list(APPEND config_options "-DLIBC_ERRNO_MODE=${LIBC_CONF_ERRNO_MODE}")
endif()
+ if(LIBC_CONF_THREAD_MODE)
+ list(APPEND config_options "-DLIBC_THREAD_MODE=${LIBC_CONF_THREAD_MODE}")
+ endif()
+
set(${output_var} ${config_options} PARENT_SCOPE)
endfunction(_get_compile_options_from_config)
diff --git a/libc/config/baremetal/aarch64/entrypoints.txt b/libc/config/baremetal/aarch64/entrypoints.txt
index e766eb2..e24e2b9 100644
--- a/libc/config/baremetal/aarch64/entrypoints.txt
+++ b/libc/config/baremetal/aarch64/entrypoints.txt
@@ -218,6 +218,7 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.stdlib.abort
libc.src.stdlib.abs
libc.src.stdlib.aligned_alloc
+ libc.src.stdlib.atexit
libc.src.stdlib.atof
libc.src.stdlib.atoi
libc.src.stdlib.atol
diff --git a/libc/config/baremetal/arm/entrypoints.txt b/libc/config/baremetal/arm/entrypoints.txt
index 336b1e6..44e9c3e 100644
--- a/libc/config/baremetal/arm/entrypoints.txt
+++ b/libc/config/baremetal/arm/entrypoints.txt
@@ -218,6 +218,7 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.stdlib.abort
libc.src.stdlib.abs
libc.src.stdlib.aligned_alloc
+ libc.src.stdlib.atexit
libc.src.stdlib.atof
libc.src.stdlib.atoi
libc.src.stdlib.atol
diff --git a/libc/config/baremetal/config.json b/libc/config/baremetal/config.json
index 105e417..f01e508 100644
--- a/libc/config/baremetal/config.json
+++ b/libc/config/baremetal/config.json
@@ -4,6 +4,11 @@
"value": "LIBC_ERRNO_MODE_EXTERNAL"
}
},
+ "threads": {
+ "LIBC_CONF_THREAD_MODE": {
+ "value": "LIBC_THREAD_MODE_SINGLE"
+ }
+ },
"printf": {
"LIBC_CONF_PRINTF_DISABLE_FIXED_POINT": {
"value": true
diff --git a/libc/config/baremetal/riscv/entrypoints.txt b/libc/config/baremetal/riscv/entrypoints.txt
index e92ec87..29cf322a 100644
--- a/libc/config/baremetal/riscv/entrypoints.txt
+++ b/libc/config/baremetal/riscv/entrypoints.txt
@@ -218,6 +218,7 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.stdlib.abort
libc.src.stdlib.abs
libc.src.stdlib.aligned_alloc
+ libc.src.stdlib.atexit
libc.src.stdlib.atof
libc.src.stdlib.atoi
libc.src.stdlib.atol
diff --git a/libc/config/config.json b/libc/config/config.json
index d53b293..1b05469 100644
--- a/libc/config/config.json
+++ b/libc/config/config.json
@@ -5,6 +5,12 @@
"doc": "The implementation used for errno, acceptable values are LIBC_ERRNO_MODE_DEFAULT, LIBC_ERRNO_MODE_UNDEFINED, LIBC_ERRNO_MODE_THREAD_LOCAL, LIBC_ERRNO_MODE_SHARED, LIBC_ERRNO_MODE_EXTERNAL, LIBC_ERRNO_MODE_SYSTEM, and LIBC_ERRNO_MODE_SYSTEM_INLINE."
}
},
+ "threads": {
+ "LIBC_CONF_THREAD_MODE": {
+ "value": "LIBC_THREAD_MODE_PLATFORM",
+ "doc": "The implementation used for Mutex, acceptable values are LIBC_THREAD_MODE_PLATFORM, LIBC_THREAD_MODE_SINGLE, and LIBC_THREAD_MODE_EXTERNAL."
+ }
+ },
"printf": {
"LIBC_CONF_PRINTF_DISABLE_FLOAT": {
"value": false,
diff --git a/libc/config/gpu/amdgpu/config.json b/libc/config/gpu/amdgpu/config.json
index 30ae10e..fa179b8 100644
--- a/libc/config/gpu/amdgpu/config.json
+++ b/libc/config/gpu/amdgpu/config.json
@@ -4,6 +4,11 @@
"value": "LIBC_ERRNO_MODE_SHARED"
}
},
+ "threads": {
+ "LIBC_CONF_THREAD_MODE": {
+ "value": "LIBC_THREAD_MODE_SINGLE"
+ }
+ },
"printf": {
"LIBC_CONF_PRINTF_DISABLE_FLOAT": {
"value": true
diff --git a/libc/config/gpu/nvptx/config.json b/libc/config/gpu/nvptx/config.json
index 30ae10e..fa179b8 100644
--- a/libc/config/gpu/nvptx/config.json
+++ b/libc/config/gpu/nvptx/config.json
@@ -4,6 +4,11 @@
"value": "LIBC_ERRNO_MODE_SHARED"
}
},
+ "threads": {
+ "LIBC_CONF_THREAD_MODE": {
+ "value": "LIBC_THREAD_MODE_SINGLE"
+ }
+ },
"printf": {
"LIBC_CONF_PRINTF_DISABLE_FLOAT": {
"value": true
diff --git a/libc/docs/configure.rst b/libc/docs/configure.rst
index 1094122..95c51b8 100644
--- a/libc/docs/configure.rst
+++ b/libc/docs/configure.rst
@@ -60,5 +60,7 @@ to learn about the defaults for your platform and target.
* **"string" options**
- ``LIBC_CONF_MEMSET_X86_USE_SOFTWARE_PREFETCHING``: Inserts prefetch for write instructions (PREFETCHW) for memset on x86 to recover performance when hardware prefetcher is disabled.
- ``LIBC_CONF_STRING_UNSAFE_WIDE_READ``: Read more than a byte at a time to perform byte-string operations like strlen.
+* **"threads" options**
+ - ``LIBC_CONF_THREAD_MODE``: The implementation used for Mutex, acceptable values are LIBC_THREAD_MODE_PLATFORM, LIBC_THREAD_MODE_SINGLE, and LIBC_THREAD_MODE_EXTERNAL.
* **"time" options**
- ``LIBC_CONF_TIME_64BIT``: Force the size of time_t to 64 bits, even on platforms where compatibility considerations would otherwise make it 32-bit.
diff --git a/libc/shared/math.h b/libc/shared/math.h
index 2153664..0605d91 100644
--- a/libc/shared/math.h
+++ b/libc/shared/math.h
@@ -23,7 +23,9 @@
#include "math/asinhf.h"
#include "math/asinhf16.h"
#include "math/atan.h"
+#include "math/atan2.h"
#include "math/atanf.h"
+#include "math/atanf16.h"
#include "math/erff.h"
#include "math/exp.h"
#include "math/exp10.h"
diff --git a/libc/shared/math/atan2.h b/libc/shared/math/atan2.h
new file mode 100644
index 0000000..8941108
--- /dev/null
+++ b/libc/shared/math/atan2.h
@@ -0,0 +1,23 @@
+//===-- Shared atan2 function -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SHARED_MATH_ATAN2_H
+#define LLVM_LIBC_SHARED_MATH_ATAN2_H
+
+#include "shared/libc_common.h"
+#include "src/__support/math/atan2.h"
+
+namespace LIBC_NAMESPACE_DECL {
+namespace shared {
+
+using math::atan2;
+
+} // namespace shared
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SHARED_MATH_ATAN2_H
diff --git a/libc/shared/math/atanf16.h b/libc/shared/math/atanf16.h
new file mode 100644
index 0000000..f196907
--- /dev/null
+++ b/libc/shared/math/atanf16.h
@@ -0,0 +1,28 @@
+//===-- Shared atanf16 function ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SHARED_MATH_ATANF16_H
+#define LLVM_LIBC_SHARED_MATH_ATANF16_H
+
+#include "shared/libc_common.h"
+
+#ifdef LIBC_TYPES_HAS_FLOAT16
+
+#include "src/__support/math/atanf16.h"
+
+namespace LIBC_NAMESPACE_DECL {
+namespace shared {
+
+using math::atanf16;
+
+} // namespace shared
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LIBC_TYPES_HAS_FLOAT16
+
+#endif // LLVM_LIBC_SHARED_MATH_ATANF16_H
diff --git a/libc/src/__support/GPU/allocator.cpp b/libc/src/__support/GPU/allocator.cpp
index 8fff4cc..250bebd 100644
--- a/libc/src/__support/GPU/allocator.cpp
+++ b/libc/src/__support/GPU/allocator.cpp
@@ -156,7 +156,7 @@ static inline constexpr uint32_t get_start_index(uint32_t chunk_size) {
// Returns the id of the lane below this one that acts as its leader.
static inline uint32_t get_leader_id(uint64_t ballot, uint32_t id) {
- uint64_t mask = id < BITS_IN_DWORD ? ~0ull << (id + 1) : 0;
+ uint64_t mask = id < BITS_IN_DWORD - 1 ? ~0ull << (id + 1) : 0;
return BITS_IN_DWORD - cpp::countl_zero(ballot & ~mask) - 1;
}
@@ -266,38 +266,31 @@ struct Slab {
// Randomly walks the bitfield until it finds a free bit. Allocations attempt
// to put lanes right next to each other for better caching and convergence.
- void *allocate(uint64_t lane_mask, uint64_t uniform) {
+ void *allocate(uint64_t uniform, uint32_t reserved) {
uint32_t chunk_size = get_chunk_size();
uint32_t state = impl::entropy();
- // The uniform mask represents which lanes contain a uniform target pointer.
- // We attempt to place these next to each other.
- void *result = nullptr;
- uint32_t after = ~0u;
- uint32_t old_index = 0;
- for (uint64_t mask = lane_mask; mask;
- mask = gpu::ballot(lane_mask, !result)) {
- if (result)
- continue;
-
- // We try using any known empty bits from the previous attempt first.
- uint32_t start = gpu::shuffle(
- mask, cpp::countr_zero(uniform & mask),
- ~after ? (old_index & ~(BITS_IN_WORD - 1)) + cpp::countr_zero(~after)
- : __builtin_align_down(impl::xorshift32(state), BITS_IN_WORD));
+ // Try to find the empty bit in the bitfield to finish the allocation. We
+ // start at the number of allocations as this is guaranteed to be available
+ // until the user starts freeing memory.
+ uint64_t lane_mask = gpu::get_lane_mask();
+ uint32_t start = gpu::shuffle(
+ lane_mask, cpp::countr_zero(uniform & lane_mask), reserved);
+ for (;;) {
+ uint64_t lane_mask = gpu::get_lane_mask();
// Each lane tries to claim one bit in a single contiguous mask.
- uint32_t id = impl::lane_count(uniform & mask, gpu::get_lane_id());
+ uint32_t id = impl::lane_count(uniform & lane_mask, gpu::get_lane_id());
uint32_t index = (start + id) % usable_bits(chunk_size);
uint32_t slot = index / BITS_IN_WORD;
uint32_t bit = index % BITS_IN_WORD;
// Get the mask of bits destined for the same slot and coalesce it.
uint32_t leader = impl::get_leader_id(
- uniform & gpu::ballot(mask, !id || index % BITS_IN_WORD == 0),
+ uniform & gpu::ballot(lane_mask, !id || index % BITS_IN_WORD == 0),
gpu::get_lane_id());
- uint32_t length = cpp::popcount(uniform & mask) -
- impl::lane_count(uniform & mask, leader);
+ uint32_t length = cpp::popcount(uniform & lane_mask) -
+ impl::lane_count(uniform & lane_mask, leader);
uint32_t bitmask =
static_cast<uint32_t>(
(uint64_t(1) << cpp::min(length, BITS_IN_WORD)) - 1)
@@ -307,18 +300,23 @@ struct Slab {
if (gpu::get_lane_id() == leader)
before = cpp::AtomicRef(get_bitfield()[slot])
.fetch_or(bitmask, cpp::MemoryOrder::RELAXED);
- before = gpu::shuffle(mask, leader, before);
- if (~before & (1 << bit))
- result = ptr_from_index(index, chunk_size);
- else
- sleep_briefly();
+ before = gpu::shuffle(lane_mask, leader, before);
+ if (~before & (1 << bit)) {
+ cpp::atomic_thread_fence(cpp::MemoryOrder::ACQUIRE);
+ return ptr_from_index(index, chunk_size);
+ }
- after = before | bitmask;
- old_index = index;
+ // If the previous operation found an empty bit we move there, otherwise
+ // we generate new random index to start at.
+ uint32_t after = before | bitmask;
+ start = gpu::shuffle(
+ gpu::get_lane_mask(),
+ cpp::countr_zero(uniform & gpu::get_lane_mask()),
+ ~after ? __builtin_align_down(index, BITS_IN_WORD) +
+ cpp::countr_zero(~after)
+ : __builtin_align_down(impl::xorshift32(state), BITS_IN_WORD));
+ sleep_briefly();
}
-
- cpp::atomic_thread_fence(cpp::MemoryOrder::ACQUIRE);
- return result;
}
// Deallocates memory by resetting its corresponding bit in the bitfield.
@@ -507,7 +505,8 @@ static cpp::Atomic<uint32_t> indices[] = {
#undef S
// Tries to find a slab in the table that can support the given chunk size.
-static Slab *find_slab(uint32_t chunk_size, uint64_t &uniform) {
+static Slab *find_slab(uint32_t chunk_size, uint64_t &uniform,
+ uint32_t &reserved) {
// We start at the index of the last successful allocation for this kind.
uint32_t chunk_id = impl::get_chunk_id(chunk_size);
uint32_t start = indices[chunk_id].load(cpp::MemoryOrder::RELAXED);
@@ -520,7 +519,6 @@ static Slab *find_slab(uint32_t chunk_size, uint64_t &uniform) {
if (!offset ||
slots[index].use_count() < Slab::available_chunks(chunk_size)) {
uint64_t lane_mask = gpu::get_lane_mask();
- uint32_t reserved = 0;
Slab *slab = slots[index].try_lock(lane_mask, uniform & lane_mask,
reserved, chunk_size, index);
@@ -580,12 +578,12 @@ void *allocate(uint64_t size) {
// Try to find a slab for the rounded up chunk size and allocate from it.
uint32_t chunk_size = impl::get_chunk_size(static_cast<uint32_t>(size));
uint64_t uniform = gpu::match_any(gpu::get_lane_mask(), chunk_size);
- Slab *slab = find_slab(chunk_size, uniform);
- if (!slab || impl::is_sentinel(reinterpret_cast<uintptr_t>(slab)))
+ uint32_t reserved = 0;
+ Slab *slab = find_slab(chunk_size, uniform, reserved);
+ if (!slab)
return nullptr;
- uint64_t lane_mask = gpu::get_lane_mask();
- void *ptr = slab->allocate(lane_mask, uniform);
+ void *ptr = slab->allocate(uniform, reserved);
return ptr;
}
diff --git a/libc/src/__support/math/CMakeLists.txt b/libc/src/__support/math/CMakeLists.txt
index 95acc962..bbb07b6 100644
--- a/libc/src/__support/math/CMakeLists.txt
+++ b/libc/src/__support/math/CMakeLists.txt
@@ -158,7 +158,7 @@ add_header_library(
asinhf16
HDRS
asinhf16.h
-DEPENDS
+ DEPENDS
.acoshf_utils
libc.src.__support.FPUtil.fenv_impl
libc.src.__support.FPUtil.fp_bits
@@ -176,7 +176,7 @@ add_header_library(
atan_utils
HDRS
atan_utils.h
-DEPENDS
+ DEPENDS
libc.src.__support.integer_literals
libc.src.__support.FPUtil.double_double
libc.src.__support.FPUtil.dyadic_float
@@ -189,7 +189,21 @@ add_header_library(
atan
HDRS
atan.h
-DEPENDS
+ DEPENDS
+ .atan_utils
+ libc.src.__support.FPUtil.double_double
+ libc.src.__support.FPUtil.fenv_impl
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.multiply_add
+ libc.src.__support.FPUtil.nearest_integer
+ libc.src.__support.macros.optimization
+)
+
+add_header_library(
+ atan2
+ HDRS
+ atan2.h
+ DEPENDS
.atan_utils
libc.src.__support.FPUtil.double_double
libc.src.__support.FPUtil.fenv_impl
@@ -215,6 +229,21 @@ add_header_library(
)
add_header_library(
+ atanf16
+ HDRS
+ atanf16.h
+ DEPENDS
+ libc.src.__support.FPUtil.cast
+ libc.src.__support.FPUtil.except_value_utils
+ libc.src.__support.FPUtil.fenv_impl
+ libc.src.__support.FPUtil.fp_bits
+ libc.src.__support.FPUtil.multiply_add
+ libc.src.__support.FPUtil.polyeval
+ libc.src.__support.FPUtil.sqrt
+ libc.src.__support.macros.optimization
+)
+
+add_header_library(
asinf
HDRS
asinf.h
diff --git a/libc/src/__support/math/asin_utils.h b/libc/src/__support/math/asin_utils.h
index e0c9096..efe779c 100644
--- a/libc/src/__support/math/asin_utils.h
+++ b/libc/src/__support/math/asin_utils.h
@@ -45,7 +45,7 @@ static constexpr double ASIN_COEFFS[12] = {
0x1.2b5993bda1d9bp-6, -0x1.806aff270bf25p-7, 0x1.02614e5ed3936p-5,
};
-LIBC_INLINE static constexpr double asin_eval(double u) {
+LIBC_INLINE double asin_eval(double u) {
double u2 = u * u;
double c0 = fputil::multiply_add(u, ASIN_COEFFS[1], ASIN_COEFFS[0]);
double c1 = fputil::multiply_add(u, ASIN_COEFFS[3], ASIN_COEFFS[2]);
diff --git a/libc/src/__support/math/atan2.h b/libc/src/__support/math/atan2.h
new file mode 100644
index 0000000..90ed926
--- /dev/null
+++ b/libc/src/__support/math/atan2.h
@@ -0,0 +1,209 @@
+//===-- Implementation header for atan2 -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC___SUPPORT_MATH_ATAN2_H
+#define LLVM_LIBC_SRC___SUPPORT_MATH_ATAN2_H
+
+#include "atan_utils.h"
+#include "src/__support/FPUtil/FEnvImpl.h"
+#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/FPUtil/double_double.h"
+#include "src/__support/FPUtil/multiply_add.h"
+#include "src/__support/FPUtil/nearest_integer.h"
+#include "src/__support/macros/config.h"
+#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
+
+namespace LIBC_NAMESPACE_DECL {
+
+namespace math {
+
+// There are several range reduction steps we can take for atan2(y, x) as
+// follow:
+
+// * Range reduction 1: signness
+// atan2(y, x) will return a number between -PI and PI representing the angle
+// forming by the 0x axis and the vector (x, y) on the 0xy-plane.
+// In particular, we have that:
+// atan2(y, x) = atan( y/x ) if x >= 0 and y >= 0 (I-quadrant)
+// = pi + atan( y/x ) if x < 0 and y >= 0 (II-quadrant)
+// = -pi + atan( y/x ) if x < 0 and y < 0 (III-quadrant)
+// = atan( y/x ) if x >= 0 and y < 0 (IV-quadrant)
+// Since atan function is odd, we can use the formula:
+// atan(-u) = -atan(u)
+// to adjust the above conditions a bit further:
+// atan2(y, x) = atan( |y|/|x| ) if x >= 0 and y >= 0 (I-quadrant)
+// = pi - atan( |y|/|x| ) if x < 0 and y >= 0 (II-quadrant)
+// = -pi + atan( |y|/|x| ) if x < 0 and y < 0 (III-quadrant)
+// = -atan( |y|/|x| ) if x >= 0 and y < 0 (IV-quadrant)
+// Which can be simplified to:
+// atan2(y, x) = sign(y) * atan( |y|/|x| ) if x >= 0
+// = sign(y) * (pi - atan( |y|/|x| )) if x < 0
+
+// * Range reduction 2: reciprocal
+// Now that the argument inside atan is positive, we can use the formula:
+// atan(1/x) = pi/2 - atan(x)
+// to make the argument inside atan <= 1 as follow:
+// atan2(y, x) = sign(y) * atan( |y|/|x|) if 0 <= |y| <= x
+// = sign(y) * (pi/2 - atan( |x|/|y| ) if 0 <= x < |y|
+// = sign(y) * (pi - atan( |y|/|x| )) if 0 <= |y| <= -x
+// = sign(y) * (pi/2 + atan( |x|/|y| )) if 0 <= -x < |y|
+
+// * Range reduction 3: look up table.
+// After the previous two range reduction steps, we reduce the problem to
+// compute atan(u) with 0 <= u <= 1, or to be precise:
+// atan( n / d ) where n = min(|x|, |y|) and d = max(|x|, |y|).
+// An accurate polynomial approximation for the whole [0, 1] input range will
+// require a very large degree. To make it more efficient, we reduce the input
+// range further by finding an integer idx such that:
+// | n/d - idx/64 | <= 1/128.
+// In particular,
+// idx := round(2^6 * n/d)
+// Then for the fast pass, we find a polynomial approximation for:
+// atan( n/d ) ~ atan( idx/64 ) + (n/d - idx/64) * Q(n/d - idx/64)
+// For the accurate pass, we use the addition formula:
+// atan( n/d ) - atan( idx/64 ) = atan( (n/d - idx/64)/(1 + (n*idx)/(64*d)) )
+// = atan( (n - d*(idx/64))/(d + n*(idx/64)) )
+// And for the fast pass, we use degree-9 Taylor polynomial to compute the RHS:
+// atan(u) ~ P(u) = u - u^3/3 + u^5/5 - u^7/7 + u^9/9
+// with absolute errors bounded by:
+// |atan(u) - P(u)| < |u|^11 / 11 < 2^-80
+// and relative errors bounded by:
+// |(atan(u) - P(u)) / P(u)| < u^10 / 11 < 2^-73.
+
+LIBC_INLINE static constexpr double atan2(double y, double x) {
+ using namespace atan_internal;
+ using FPBits = fputil::FPBits<double>;
+
+ constexpr double IS_NEG[2] = {1.0, -1.0};
+ constexpr DoubleDouble ZERO = {0.0, 0.0};
+ constexpr DoubleDouble MZERO = {-0.0, -0.0};
+ constexpr DoubleDouble PI = {0x1.1a62633145c07p-53, 0x1.921fb54442d18p+1};
+ constexpr DoubleDouble MPI = {-0x1.1a62633145c07p-53, -0x1.921fb54442d18p+1};
+ constexpr DoubleDouble PI_OVER_2 = {0x1.1a62633145c07p-54,
+ 0x1.921fb54442d18p0};
+ constexpr DoubleDouble MPI_OVER_2 = {-0x1.1a62633145c07p-54,
+ -0x1.921fb54442d18p0};
+ constexpr DoubleDouble PI_OVER_4 = {0x1.1a62633145c07p-55,
+ 0x1.921fb54442d18p-1};
+ constexpr DoubleDouble THREE_PI_OVER_4 = {0x1.a79394c9e8a0ap-54,
+ 0x1.2d97c7f3321d2p+1};
+ // Adjustment for constant term:
+ // CONST_ADJ[x_sign][y_sign][recip]
+ constexpr DoubleDouble CONST_ADJ[2][2][2] = {
+ {{ZERO, MPI_OVER_2}, {MZERO, MPI_OVER_2}},
+ {{MPI, PI_OVER_2}, {MPI, PI_OVER_2}}};
+
+ FPBits x_bits(x), y_bits(y);
+ bool x_sign = x_bits.sign().is_neg();
+ bool y_sign = y_bits.sign().is_neg();
+ x_bits = x_bits.abs();
+ y_bits = y_bits.abs();
+ uint64_t x_abs = x_bits.uintval();
+ uint64_t y_abs = y_bits.uintval();
+ bool recip = x_abs < y_abs;
+ uint64_t min_abs = recip ? x_abs : y_abs;
+ uint64_t max_abs = !recip ? x_abs : y_abs;
+ unsigned min_exp = static_cast<unsigned>(min_abs >> FPBits::FRACTION_LEN);
+ unsigned max_exp = static_cast<unsigned>(max_abs >> FPBits::FRACTION_LEN);
+
+ double num = FPBits(min_abs).get_val();
+ double den = FPBits(max_abs).get_val();
+
+ // Check for exceptional cases, whether inputs are 0, inf, nan, or close to
+ // overflow, or close to underflow.
+ if (LIBC_UNLIKELY(max_exp > 0x7ffU - 128U || min_exp < 128U)) {
+ if (x_bits.is_nan() || y_bits.is_nan()) {
+ if (x_bits.is_signaling_nan() || y_bits.is_signaling_nan())
+ fputil::raise_except_if_required(FE_INVALID);
+ return FPBits::quiet_nan().get_val();
+ }
+ unsigned x_except = x == 0.0 ? 0 : (FPBits(x_abs).is_inf() ? 2 : 1);
+ unsigned y_except = y == 0.0 ? 0 : (FPBits(y_abs).is_inf() ? 2 : 1);
+
+ // Exceptional cases:
+ // EXCEPT[y_except][x_except][x_is_neg]
+ // with x_except & y_except:
+ // 0: zero
+ // 1: finite, non-zero
+ // 2: infinity
+ constexpr DoubleDouble EXCEPTS[3][3][2] = {
+ {{ZERO, PI}, {ZERO, PI}, {ZERO, PI}},
+ {{PI_OVER_2, PI_OVER_2}, {ZERO, ZERO}, {ZERO, PI}},
+ {{PI_OVER_2, PI_OVER_2},
+ {PI_OVER_2, PI_OVER_2},
+ {PI_OVER_4, THREE_PI_OVER_4}},
+ };
+
+ if ((x_except != 1) || (y_except != 1)) {
+ DoubleDouble r = EXCEPTS[y_except][x_except][x_sign];
+ return fputil::multiply_add(IS_NEG[y_sign], r.hi, IS_NEG[y_sign] * r.lo);
+ }
+ bool scale_up = min_exp < 128U;
+ bool scale_down = max_exp > 0x7ffU - 128U;
+ // At least one input is denormal, multiply both numerator and denominator
+ // by some large enough power of 2 to normalize denormal inputs.
+ if (scale_up) {
+ num *= 0x1.0p64;
+ if (!scale_down)
+ den *= 0x1.0p64;
+ } else if (scale_down) {
+ den *= 0x1.0p-64;
+ if (!scale_up)
+ num *= 0x1.0p-64;
+ }
+
+ min_abs = FPBits(num).uintval();
+ max_abs = FPBits(den).uintval();
+ min_exp = static_cast<unsigned>(min_abs >> FPBits::FRACTION_LEN);
+ max_exp = static_cast<unsigned>(max_abs >> FPBits::FRACTION_LEN);
+ }
+
+ double final_sign = IS_NEG[(x_sign != y_sign) != recip];
+ DoubleDouble const_term = CONST_ADJ[x_sign][y_sign][recip];
+ unsigned exp_diff = max_exp - min_exp;
+ // We have the following bound for normalized n and d:
+ // 2^(-exp_diff - 1) < n/d < 2^(-exp_diff + 1).
+ if (LIBC_UNLIKELY(exp_diff > 54)) {
+ return fputil::multiply_add(final_sign, const_term.hi,
+ final_sign * (const_term.lo + num / den));
+ }
+
+ double k = fputil::nearest_integer(64.0 * num / den);
+ unsigned idx = static_cast<unsigned>(k);
+ // k = idx / 64
+ k *= 0x1.0p-6;
+
+ // Range reduction:
+ // atan(n/d) - atan(k/64) = atan((n/d - k/64) / (1 + (n/d) * (k/64)))
+ // = atan((n - d * k/64)) / (d + n * k/64))
+ DoubleDouble num_k = fputil::exact_mult(num, k);
+ DoubleDouble den_k = fputil::exact_mult(den, k);
+
+ // num_dd = n - d * k
+ DoubleDouble num_dd = fputil::exact_add(num - den_k.hi, -den_k.lo);
+ // den_dd = d + n * k
+ DoubleDouble den_dd = fputil::exact_add(den, num_k.hi);
+ den_dd.lo += num_k.lo;
+
+ // q = (n - d * k) / (d + n * k)
+ DoubleDouble q = fputil::div(num_dd, den_dd);
+ // p ~ atan(q)
+ DoubleDouble p = atan_eval(q);
+
+ DoubleDouble r = fputil::add(const_term, fputil::add(ATAN_I[idx], p));
+ r.hi *= final_sign;
+ r.lo *= final_sign;
+
+ return r.hi + r.lo;
+}
+
+} // namespace math
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LLVM_LIBC_SRC___SUPPORT_MATH_ATAN2_H
diff --git a/libc/src/__support/math/atanf16.h b/libc/src/__support/math/atanf16.h
new file mode 100644
index 0000000..f75d145
--- /dev/null
+++ b/libc/src/__support/math/atanf16.h
@@ -0,0 +1,119 @@
+//===-- Implementation header for atanf16 -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC___SUPPORT_MATH_ATANF16_H
+#define LLVM_LIBC_SRC___SUPPORT_MATH_ATANF16_H
+
+#include "include/llvm-libc-macros/float16-macros.h"
+
+#ifdef LIBC_TYPES_HAS_FLOAT16
+
+#include "src/__support/FPUtil/FEnvImpl.h"
+#include "src/__support/FPUtil/FPBits.h"
+#include "src/__support/FPUtil/PolyEval.h"
+#include "src/__support/FPUtil/cast.h"
+#include "src/__support/FPUtil/except_value_utils.h"
+#include "src/__support/FPUtil/multiply_add.h"
+#include "src/__support/FPUtil/sqrt.h"
+#include "src/__support/macros/optimization.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+namespace math {
+
+LIBC_INLINE static constexpr float16 atanf16(float16 x) {
+ // Generated by Solly using the following command:
+ // > round(pi/2, SG, RN);
+ constexpr float PI_2 = 0x1.921fb6p0;
+
+#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+ constexpr size_t N_EXCEPTS = 6;
+
+ constexpr fputil::ExceptValues<float16, N_EXCEPTS> ATANF16_EXCEPTS{{
+ // (input, RZ output, RU offset, RD offset, RN offset)
+ {0x2745, 0x2744, 1, 0, 1},
+ {0x3099, 0x3090, 1, 0, 1},
+ {0x3c6c, 0x3aae, 1, 0, 1},
+ {0x466e, 0x3daa, 1, 0, 1},
+ {0x48ae, 0x3ddb, 1, 0, 0},
+ {0x5619, 0x3e3d, 1, 0, 1},
+ }};
+#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+
+ using FPBits = fputil::FPBits<float16>;
+ FPBits xbits(x);
+
+ uint16_t x_u = xbits.uintval();
+ uint16_t x_abs = x_u & 0x7fff;
+ bool x_sign = x_u >> 15;
+ float sign = (x_sign ? -1.0 : 1.0);
+
+ // |x| >= +/-inf
+ if (LIBC_UNLIKELY(x_abs >= 0x7c00)) {
+ if (xbits.is_nan()) {
+ if (xbits.is_signaling_nan()) {
+ fputil::raise_except_if_required(FE_INVALID);
+ return FPBits::quiet_nan().get_val();
+ }
+ return x;
+ }
+
+ // atanf16(+/-inf) = +/-pi/2
+ return fputil::cast<float16>(sign * PI_2);
+ }
+
+ float xf = x;
+ float xsq = xf * xf;
+#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
+ // Handle exceptional values
+ if (auto r = ATANF16_EXCEPTS.lookup_odd(x_abs, x_sign);
+ LIBC_UNLIKELY(r.has_value()))
+ return r.value();
+#endif
+
+ // |x| <= 0x1p0, |x| <= 1
+ if (x_abs <= 0x3c00) {
+ // atanf16(+/-0) = +/-0
+ if (LIBC_UNLIKELY(x_abs == 0))
+ return x;
+
+ // Degree-14 minimax odd polynomial of atan(x) generated by Sollya with:
+ // > P = fpminimax(atan(x)/x, [|0, 2, 4, 6, 8, 10, 12, 14|], [|SG...|],
+ // [0, 1]);
+ float result = fputil::polyeval(
+ xsq, 0x1.fffffcp-1f, -0x1.55519ep-2f, 0x1.98f6a8p-3f, -0x1.1f0a92p-3f,
+ 0x1.95b654p-4f, -0x1.e65492p-5f, 0x1.8c0c36p-6f, -0x1.32316ep-8f);
+ return fputil::cast<float16>(xf * result);
+ }
+
+ // If |x| > 1
+ // y = atan(x) = sign(x) * atan(|x|)
+ // atan(|x|) = pi/2 - atan(1/|x|)
+ // Recall, 1/|x| < 1
+ float x_inv_sq = 1.0f / xsq;
+ float x_inv = fputil::sqrt<float>(x_inv_sq);
+
+ // Degree-14 minimax odd polynomial of atan(x) generated by Sollya with:
+ // > P = fpminimax(atan(x)/x, [|0, 2, 4, 6, 8, 10, 12, 14|], [|SG...|],
+ // [0, 1]);
+ float interm =
+ fputil::polyeval(x_inv_sq, 0x1.fffffcp-1f, -0x1.55519ep-2f,
+ 0x1.98f6a8p-3f, -0x1.1f0a92p-3f, 0x1.95b654p-4f,
+ -0x1.e65492p-5f, 0x1.8c0c36p-6f, -0x1.32316ep-8f);
+
+ return fputil::cast<float16>(sign *
+ fputil::multiply_add(x_inv, -interm, PI_2));
+}
+
+} // namespace math
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#endif // LIBC_TYPES_HAS_FLOAT16
+
+#endif // LLVM_LIBC_SRC___SUPPORT_MATH_ATANF16_H
diff --git a/libc/src/__support/threads/CMakeLists.txt b/libc/src/__support/threads/CMakeLists.txt
index b084346..f8a4493 100644
--- a/libc/src/__support/threads/CMakeLists.txt
+++ b/libc/src/__support/threads/CMakeLists.txt
@@ -42,6 +42,14 @@ if(TARGET libc.src.__support.threads.${LIBC_TARGET_OS}.mutex)
.mutex
libc.src.__support.CPP.mutex
)
+elseif(NOT (LIBC_CONF_THREAD_MODE STREQUAL LIBC_THREAD_MODE_PLATFORM))
+ add_header_library(
+ mutex
+ HDRS
+ mutex.h
+ DEPENDS
+ .mutex_common
+ )
endif()
add_header_library(
diff --git a/libc/src/__support/threads/gpu/CMakeLists.txt b/libc/src/__support/threads/gpu/CMakeLists.txt
deleted file mode 100644
index ea89feb..0000000
--- a/libc/src/__support/threads/gpu/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-add_header_library(
- mutex
- HDRS
- mutex.h
-)
diff --git a/libc/src/__support/threads/gpu/mutex.h b/libc/src/__support/threads/gpu/mutex.h
deleted file mode 100644
index c8c484e..0000000
--- a/libc/src/__support/threads/gpu/mutex.h
+++ /dev/null
@@ -1,32 +0,0 @@
-//===--- Implementation of a GPU mutex class --------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_SRC___SUPPORT_THREADS_GPU_MUTEX_H
-#define LLVM_LIBC_SRC___SUPPORT_THREADS_GPU_MUTEX_H
-
-#include "src/__support/macros/attributes.h"
-#include "src/__support/macros/config.h"
-#include "src/__support/threads/mutex_common.h"
-
-namespace LIBC_NAMESPACE_DECL {
-
-/// Implementation of a simple passthrough mutex which guards nothing. A
-/// complete Mutex locks in general cannot be implemented on the GPU. We simply
-/// define the Mutex interface and require that only a single thread executes
-/// code requiring a mutex lock.
-struct Mutex {
- LIBC_INLINE constexpr Mutex(bool, bool, bool, bool) {}
-
- LIBC_INLINE MutexError lock() { return MutexError::NONE; }
- LIBC_INLINE MutexError unlock() { return MutexError::NONE; }
- LIBC_INLINE MutexError reset() { return MutexError::NONE; }
-};
-
-} // namespace LIBC_NAMESPACE_DECL
-
-#endif // LLVM_LIBC_SRC___SUPPORT_THREADS_GPU_MUTEX_H
diff --git a/libc/src/__support/threads/mutex.h b/libc/src/__support/threads/mutex.h
index 392b389..cbef0d0 100644
--- a/libc/src/__support/threads/mutex.h
+++ b/libc/src/__support/threads/mutex.h
@@ -9,10 +9,35 @@
#ifndef LLVM_LIBC_SRC___SUPPORT_THREADS_MUTEX_H
#define LLVM_LIBC_SRC___SUPPORT_THREADS_MUTEX_H
-#include "src/__support/macros/properties/architectures.h"
+#include "src/__support/macros/attributes.h"
+#include "src/__support/macros/config.h"
+
+// Uses the platform specific specialization
+#define LIBC_THREAD_MODE_PLATFORM 0
+
+// Mutex guards nothing, used in single-threaded implementations
+#define LIBC_THREAD_MODE_SINGLE 1
+
+// Vendor provides implementation
+#define LIBC_THREAD_MODE_EXTERNAL 2
+
+#if !defined(LIBC_THREAD_MODE)
+#error LIBC_THREAD_MODE is undefined
+#endif // LIBC_THREAD_MODE
+
+#if LIBC_THREAD_MODE != LIBC_THREAD_MODE_PLATFORM && \
+ LIBC_THREAD_MODE != LIBC_THREAD_MODE_SINGLE && \
+ LIBC_THREAD_MODE != LIBC_THREAD_MODE_EXTERNAL
+#error LIBC_THREAD_MODE must be one of the following values: \
+LIBC_THREAD_MODE_PLATFORM, \
+LIBC_THREAD_MODE_SINGLE, \
+LIBC_THREAD_MODE_EXTERNAL.
+#endif
+
+#if LIBC_THREAD_MODE == LIBC_THREAD_MODE_PLATFORM
// Platform independent code will include this header file which pulls
-// the platfrom specific specializations using platform macros.
+// the platform specific specializations using platform macros.
//
// The platform specific specializations should define a class by name
// Mutex with non-static methods having the following signature:
@@ -39,8 +64,32 @@
#if defined(__linux__)
#include "src/__support/threads/linux/mutex.h"
-#elif defined(LIBC_TARGET_ARCH_IS_GPU)
-#include "src/__support/threads/gpu/mutex.h"
#endif // __linux__
+#elif LIBC_THREAD_MODE == LIBC_THREAD_MODE_SINGLE
+
+#include "src/__support/threads/mutex_common.h"
+
+namespace LIBC_NAMESPACE_DECL {
+
+/// Implementation of a simple passthrough mutex which guards nothing. A
+/// complete Mutex locks in general cannot be implemented on the GPU, or on some
+/// baremetal platforms. We simply define the Mutex interface and require that
+/// only a single thread executes code requiring a mutex lock.
+struct Mutex {
+ LIBC_INLINE constexpr Mutex(bool, bool, bool, bool) {}
+
+ LIBC_INLINE MutexError lock() { return MutexError::NONE; }
+ LIBC_INLINE MutexError unlock() { return MutexError::NONE; }
+ LIBC_INLINE MutexError reset() { return MutexError::NONE; }
+};
+
+} // namespace LIBC_NAMESPACE_DECL
+
+#elif LIBC_THREAD_MODE == LIBC_THREAD_MODE_EXTERNAL
+
+// TODO: Implement the interfacing, if necessary, e.g. "extern struct Mutex;"
+
+#endif // LIBC_THREAD_MODE == LIBC_THREAD_MODE_PLATFORM
+
#endif // LLVM_LIBC_SRC___SUPPORT_THREADS_MUTEX_H
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index 701dc4b..6bcb1e2 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -4037,17 +4037,7 @@ add_entrypoint_object(
HDRS
../atanf16.h
DEPENDS
- libc.hdr.errno_macros
- libc.hdr.fenv_macros
- libc.src.__support.FPUtil.cast
- libc.src.__support.FPUtil.except_value_utils
- libc.src.__support.FPUtil.fenv_impl
- libc.src.__support.FPUtil.fp_bits
- libc.src.__support.FPUtil.multiply_add
- libc.src.__support.FPUtil.polyeval
- libc.src.__support.FPUtil.sqrt
- libc.src.__support.macros.optimization
- libc.src.__support.macros.properties.types
+ libc.src.__support.math.atanf16
)
add_entrypoint_object(
@@ -4089,13 +4079,7 @@ add_entrypoint_object(
HDRS
../atan2.h
DEPENDS
- libc.src.__support.math.atan_utils
- libc.src.__support.FPUtil.double_double
- libc.src.__support.FPUtil.fenv_impl
- libc.src.__support.FPUtil.fp_bits
- libc.src.__support.FPUtil.multiply_add
- libc.src.__support.FPUtil.nearest_integer
- libc.src.__support.macros.optimization
+ libc.src.__support.math.atan2
)
add_entrypoint_object(
@@ -4105,7 +4089,7 @@ add_entrypoint_object(
HDRS
../atan2l.h
DEPENDS
- .atan2
+ libc.src.__support.math.atan2
)
add_entrypoint_object(
diff --git a/libc/src/math/generic/atan2.cpp b/libc/src/math/generic/atan2.cpp
index 58042d3..4aaa63d 100644
--- a/libc/src/math/generic/atan2.cpp
+++ b/libc/src/math/generic/atan2.cpp
@@ -7,195 +7,12 @@
//===----------------------------------------------------------------------===//
#include "src/math/atan2.h"
-#include "src/__support/FPUtil/FEnvImpl.h"
-#include "src/__support/FPUtil/FPBits.h"
-#include "src/__support/FPUtil/double_double.h"
-#include "src/__support/FPUtil/multiply_add.h"
-#include "src/__support/FPUtil/nearest_integer.h"
-#include "src/__support/macros/config.h"
-#include "src/__support/macros/optimization.h" // LIBC_UNLIKELY
-#include "src/__support/math/atan_utils.h"
+#include "src/__support/math/atan2.h"
namespace LIBC_NAMESPACE_DECL {
-// There are several range reduction steps we can take for atan2(y, x) as
-// follow:
-
-// * Range reduction 1: signness
-// atan2(y, x) will return a number between -PI and PI representing the angle
-// forming by the 0x axis and the vector (x, y) on the 0xy-plane.
-// In particular, we have that:
-// atan2(y, x) = atan( y/x ) if x >= 0 and y >= 0 (I-quadrant)
-// = pi + atan( y/x ) if x < 0 and y >= 0 (II-quadrant)
-// = -pi + atan( y/x ) if x < 0 and y < 0 (III-quadrant)
-// = atan( y/x ) if x >= 0 and y < 0 (IV-quadrant)
-// Since atan function is odd, we can use the formula:
-// atan(-u) = -atan(u)
-// to adjust the above conditions a bit further:
-// atan2(y, x) = atan( |y|/|x| ) if x >= 0 and y >= 0 (I-quadrant)
-// = pi - atan( |y|/|x| ) if x < 0 and y >= 0 (II-quadrant)
-// = -pi + atan( |y|/|x| ) if x < 0 and y < 0 (III-quadrant)
-// = -atan( |y|/|x| ) if x >= 0 and y < 0 (IV-quadrant)
-// Which can be simplified to:
-// atan2(y, x) = sign(y) * atan( |y|/|x| ) if x >= 0
-// = sign(y) * (pi - atan( |y|/|x| )) if x < 0
-
-// * Range reduction 2: reciprocal
-// Now that the argument inside atan is positive, we can use the formula:
-// atan(1/x) = pi/2 - atan(x)
-// to make the argument inside atan <= 1 as follow:
-// atan2(y, x) = sign(y) * atan( |y|/|x|) if 0 <= |y| <= x
-// = sign(y) * (pi/2 - atan( |x|/|y| ) if 0 <= x < |y|
-// = sign(y) * (pi - atan( |y|/|x| )) if 0 <= |y| <= -x
-// = sign(y) * (pi/2 + atan( |x|/|y| )) if 0 <= -x < |y|
-
-// * Range reduction 3: look up table.
-// After the previous two range reduction steps, we reduce the problem to
-// compute atan(u) with 0 <= u <= 1, or to be precise:
-// atan( n / d ) where n = min(|x|, |y|) and d = max(|x|, |y|).
-// An accurate polynomial approximation for the whole [0, 1] input range will
-// require a very large degree. To make it more efficient, we reduce the input
-// range further by finding an integer idx such that:
-// | n/d - idx/64 | <= 1/128.
-// In particular,
-// idx := round(2^6 * n/d)
-// Then for the fast pass, we find a polynomial approximation for:
-// atan( n/d ) ~ atan( idx/64 ) + (n/d - idx/64) * Q(n/d - idx/64)
-// For the accurate pass, we use the addition formula:
-// atan( n/d ) - atan( idx/64 ) = atan( (n/d - idx/64)/(1 + (n*idx)/(64*d)) )
-// = atan( (n - d*(idx/64))/(d + n*(idx/64)) )
-// And for the fast pass, we use degree-9 Taylor polynomial to compute the RHS:
-// atan(u) ~ P(u) = u - u^3/3 + u^5/5 - u^7/7 + u^9/9
-// with absolute errors bounded by:
-// |atan(u) - P(u)| < |u|^11 / 11 < 2^-80
-// and relative errors bounded by:
-// |(atan(u) - P(u)) / P(u)| < u^10 / 11 < 2^-73.
-
LLVM_LIBC_FUNCTION(double, atan2, (double y, double x)) {
- using namespace atan_internal;
- using FPBits = fputil::FPBits<double>;
-
- constexpr double IS_NEG[2] = {1.0, -1.0};
- constexpr DoubleDouble ZERO = {0.0, 0.0};
- constexpr DoubleDouble MZERO = {-0.0, -0.0};
- constexpr DoubleDouble PI = {0x1.1a62633145c07p-53, 0x1.921fb54442d18p+1};
- constexpr DoubleDouble MPI = {-0x1.1a62633145c07p-53, -0x1.921fb54442d18p+1};
- constexpr DoubleDouble PI_OVER_2 = {0x1.1a62633145c07p-54,
- 0x1.921fb54442d18p0};
- constexpr DoubleDouble MPI_OVER_2 = {-0x1.1a62633145c07p-54,
- -0x1.921fb54442d18p0};
- constexpr DoubleDouble PI_OVER_4 = {0x1.1a62633145c07p-55,
- 0x1.921fb54442d18p-1};
- constexpr DoubleDouble THREE_PI_OVER_4 = {0x1.a79394c9e8a0ap-54,
- 0x1.2d97c7f3321d2p+1};
- // Adjustment for constant term:
- // CONST_ADJ[x_sign][y_sign][recip]
- constexpr DoubleDouble CONST_ADJ[2][2][2] = {
- {{ZERO, MPI_OVER_2}, {MZERO, MPI_OVER_2}},
- {{MPI, PI_OVER_2}, {MPI, PI_OVER_2}}};
-
- FPBits x_bits(x), y_bits(y);
- bool x_sign = x_bits.sign().is_neg();
- bool y_sign = y_bits.sign().is_neg();
- x_bits = x_bits.abs();
- y_bits = y_bits.abs();
- uint64_t x_abs = x_bits.uintval();
- uint64_t y_abs = y_bits.uintval();
- bool recip = x_abs < y_abs;
- uint64_t min_abs = recip ? x_abs : y_abs;
- uint64_t max_abs = !recip ? x_abs : y_abs;
- unsigned min_exp = static_cast<unsigned>(min_abs >> FPBits::FRACTION_LEN);
- unsigned max_exp = static_cast<unsigned>(max_abs >> FPBits::FRACTION_LEN);
-
- double num = FPBits(min_abs).get_val();
- double den = FPBits(max_abs).get_val();
-
- // Check for exceptional cases, whether inputs are 0, inf, nan, or close to
- // overflow, or close to underflow.
- if (LIBC_UNLIKELY(max_exp > 0x7ffU - 128U || min_exp < 128U)) {
- if (x_bits.is_nan() || y_bits.is_nan()) {
- if (x_bits.is_signaling_nan() || y_bits.is_signaling_nan())
- fputil::raise_except_if_required(FE_INVALID);
- return FPBits::quiet_nan().get_val();
- }
- unsigned x_except = x == 0.0 ? 0 : (FPBits(x_abs).is_inf() ? 2 : 1);
- unsigned y_except = y == 0.0 ? 0 : (FPBits(y_abs).is_inf() ? 2 : 1);
-
- // Exceptional cases:
- // EXCEPT[y_except][x_except][x_is_neg]
- // with x_except & y_except:
- // 0: zero
- // 1: finite, non-zero
- // 2: infinity
- constexpr DoubleDouble EXCEPTS[3][3][2] = {
- {{ZERO, PI}, {ZERO, PI}, {ZERO, PI}},
- {{PI_OVER_2, PI_OVER_2}, {ZERO, ZERO}, {ZERO, PI}},
- {{PI_OVER_2, PI_OVER_2},
- {PI_OVER_2, PI_OVER_2},
- {PI_OVER_4, THREE_PI_OVER_4}},
- };
-
- if ((x_except != 1) || (y_except != 1)) {
- DoubleDouble r = EXCEPTS[y_except][x_except][x_sign];
- return fputil::multiply_add(IS_NEG[y_sign], r.hi, IS_NEG[y_sign] * r.lo);
- }
- bool scale_up = min_exp < 128U;
- bool scale_down = max_exp > 0x7ffU - 128U;
- // At least one input is denormal, multiply both numerator and denominator
- // by some large enough power of 2 to normalize denormal inputs.
- if (scale_up) {
- num *= 0x1.0p64;
- if (!scale_down)
- den *= 0x1.0p64;
- } else if (scale_down) {
- den *= 0x1.0p-64;
- if (!scale_up)
- num *= 0x1.0p-64;
- }
-
- min_abs = FPBits(num).uintval();
- max_abs = FPBits(den).uintval();
- min_exp = static_cast<unsigned>(min_abs >> FPBits::FRACTION_LEN);
- max_exp = static_cast<unsigned>(max_abs >> FPBits::FRACTION_LEN);
- }
-
- double final_sign = IS_NEG[(x_sign != y_sign) != recip];
- DoubleDouble const_term = CONST_ADJ[x_sign][y_sign][recip];
- unsigned exp_diff = max_exp - min_exp;
- // We have the following bound for normalized n and d:
- // 2^(-exp_diff - 1) < n/d < 2^(-exp_diff + 1).
- if (LIBC_UNLIKELY(exp_diff > 54)) {
- return fputil::multiply_add(final_sign, const_term.hi,
- final_sign * (const_term.lo + num / den));
- }
-
- double k = fputil::nearest_integer(64.0 * num / den);
- unsigned idx = static_cast<unsigned>(k);
- // k = idx / 64
- k *= 0x1.0p-6;
-
- // Range reduction:
- // atan(n/d) - atan(k/64) = atan((n/d - k/64) / (1 + (n/d) * (k/64)))
- // = atan((n - d * k/64)) / (d + n * k/64))
- DoubleDouble num_k = fputil::exact_mult(num, k);
- DoubleDouble den_k = fputil::exact_mult(den, k);
-
- // num_dd = n - d * k
- DoubleDouble num_dd = fputil::exact_add(num - den_k.hi, -den_k.lo);
- // den_dd = d + n * k
- DoubleDouble den_dd = fputil::exact_add(den, num_k.hi);
- den_dd.lo += num_k.lo;
-
- // q = (n - d * k) / (d + n * k)
- DoubleDouble q = fputil::div(num_dd, den_dd);
- // p ~ atan(q)
- DoubleDouble p = atan_eval(q);
-
- DoubleDouble r = fputil::add(const_term, fputil::add(ATAN_I[idx], p));
- r.hi *= final_sign;
- r.lo *= final_sign;
-
- return r.hi + r.lo;
+ return math::atan2(y, x);
}
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/math/generic/atan2l.cpp b/libc/src/math/generic/atan2l.cpp
index 47a2e985..a7824c6 100644
--- a/libc/src/math/generic/atan2l.cpp
+++ b/libc/src/math/generic/atan2l.cpp
@@ -9,7 +9,7 @@
#include "src/math/atan2l.h"
#include "src/__support/common.h"
#include "src/__support/macros/properties/types.h"
-#include "src/math/atan2.h"
+#include "src/__support/math/atan2.h"
namespace LIBC_NAMESPACE_DECL {
@@ -17,7 +17,7 @@ namespace LIBC_NAMESPACE_DECL {
LLVM_LIBC_FUNCTION(long double, atan2l, (long double y, long double x)) {
#if defined(LIBC_TYPES_LONG_DOUBLE_IS_FLOAT64)
return static_cast<long double>(
- atan2(static_cast<double>(y), static_cast<double>(x)));
+ math::atan2(static_cast<double>(y), static_cast<double>(x)));
#else
#error "Extended precision is not yet supported"
#endif
diff --git a/libc/src/math/generic/atanf16.cpp b/libc/src/math/generic/atanf16.cpp
index 9b6ec65..7191c42 100644
--- a/libc/src/math/generic/atanf16.cpp
+++ b/libc/src/math/generic/atanf16.cpp
@@ -7,101 +7,10 @@
//===----------------------------------------------------------------------===//
#include "src/math/atanf16.h"
-#include "hdr/errno_macros.h"
-#include "hdr/fenv_macros.h"
-#include "src/__support/FPUtil/FEnvImpl.h"
-#include "src/__support/FPUtil/FPBits.h"
-#include "src/__support/FPUtil/PolyEval.h"
-#include "src/__support/FPUtil/cast.h"
-#include "src/__support/FPUtil/except_value_utils.h"
-#include "src/__support/FPUtil/multiply_add.h"
-#include "src/__support/FPUtil/sqrt.h"
-#include "src/__support/macros/optimization.h"
+#include "src/__support/math/atanf16.h"
namespace LIBC_NAMESPACE_DECL {
-// Generated by Solly using the following command:
-// > round(pi/2, SG, RN);
-static constexpr float PI_2 = 0x1.921fb6p0;
-
-#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
-static constexpr size_t N_EXCEPTS = 6;
-
-static constexpr fputil::ExceptValues<float16, N_EXCEPTS> ATANF16_EXCEPTS{{
- // (input, RZ output, RU offset, RD offset, RN offset)
- {0x2745, 0x2744, 1, 0, 1},
- {0x3099, 0x3090, 1, 0, 1},
- {0x3c6c, 0x3aae, 1, 0, 1},
- {0x466e, 0x3daa, 1, 0, 1},
- {0x48ae, 0x3ddb, 1, 0, 0},
- {0x5619, 0x3e3d, 1, 0, 1},
-}};
-#endif // !LIBC_MATH_HAS_SKIP_ACCURATE_PASS
-
-LLVM_LIBC_FUNCTION(float16, atanf16, (float16 x)) {
- using FPBits = fputil::FPBits<float16>;
- FPBits xbits(x);
-
- uint16_t x_u = xbits.uintval();
- uint16_t x_abs = x_u & 0x7fff;
- bool x_sign = x_u >> 15;
- float sign = (x_sign ? -1.0 : 1.0);
-
- // |x| >= +/-inf
- if (LIBC_UNLIKELY(x_abs >= 0x7c00)) {
- if (xbits.is_nan()) {
- if (xbits.is_signaling_nan()) {
- fputil::raise_except_if_required(FE_INVALID);
- return FPBits::quiet_nan().get_val();
- }
- return x;
- }
-
- // atanf16(+/-inf) = +/-pi/2
- return fputil::cast<float16>(sign * PI_2);
- }
-
- float xf = x;
- float xsq = xf * xf;
-#ifndef LIBC_MATH_HAS_SKIP_ACCURATE_PASS
- // Handle exceptional values
- if (auto r = ATANF16_EXCEPTS.lookup_odd(x_abs, x_sign);
- LIBC_UNLIKELY(r.has_value()))
- return r.value();
-#endif
-
- // |x| <= 0x1p0, |x| <= 1
- if (x_abs <= 0x3c00) {
- // atanf16(+/-0) = +/-0
- if (LIBC_UNLIKELY(x_abs == 0))
- return x;
-
- // Degree-14 minimax odd polynomial of atan(x) generated by Sollya with:
- // > P = fpminimax(atan(x)/x, [|0, 2, 4, 6, 8, 10, 12, 14|], [|SG...|],
- // [0, 1]);
- float result = fputil::polyeval(
- xsq, 0x1.fffffcp-1f, -0x1.55519ep-2f, 0x1.98f6a8p-3f, -0x1.1f0a92p-3f,
- 0x1.95b654p-4f, -0x1.e65492p-5f, 0x1.8c0c36p-6f, -0x1.32316ep-8f);
- return fputil::cast<float16>(xf * result);
- }
-
- // If |x| > 1
- // y = atan(x) = sign(x) * atan(|x|)
- // atan(|x|) = pi/2 - atan(1/|x|)
- // Recall, 1/|x| < 1
- float x_inv_sq = 1.0f / xsq;
- float x_inv = fputil::sqrt<float>(x_inv_sq);
-
- // Degree-14 minimax odd polynomial of atan(x) generated by Sollya with:
- // > P = fpminimax(atan(x)/x, [|0, 2, 4, 6, 8, 10, 12, 14|], [|SG...|],
- // [0, 1]);
- float interm =
- fputil::polyeval(x_inv_sq, 0x1.fffffcp-1f, -0x1.55519ep-2f,
- 0x1.98f6a8p-3f, -0x1.1f0a92p-3f, 0x1.95b654p-4f,
- -0x1.e65492p-5f, 0x1.8c0c36p-6f, -0x1.32316ep-8f);
-
- return fputil::cast<float16>(sign *
- fputil::multiply_add(x_inv, -interm, PI_2));
-}
+LLVM_LIBC_FUNCTION(float16, atanf16, (float16 x)) { return math::atanf16(x); }
} // namespace LIBC_NAMESPACE_DECL
diff --git a/libc/src/sched/linux/CMakeLists.txt b/libc/src/sched/linux/CMakeLists.txt
index 66ebaea..bb50002 100644
--- a/libc/src/sched/linux/CMakeLists.txt
+++ b/libc/src/sched/linux/CMakeLists.txt
@@ -18,7 +18,9 @@ add_entrypoint_object(
../sched_getaffinity.h
DEPENDS
libc.hdr.stdint_proxy
- libc.include.sched
+ libc.hdr.types.cpu_set_t
+ libc.hdr.types.pid_t
+ libc.hdr.types.size_t
libc.src.__support.OSUtil.osutil
libc.src.errno.errno
)
@@ -30,7 +32,9 @@ add_entrypoint_object(
HDRS
../sched_setaffinity.h
DEPENDS
- libc.include.sched
+ libc.hdr.types.cpu_set_t
+ libc.hdr.types.pid_t
+ libc.hdr.types.size_t
libc.src.__support.OSUtil.osutil
libc.src.errno.errno
)
@@ -42,7 +46,8 @@ add_entrypoint_object(
HDRS
../sched_getcpucount.h
DEPENDS
- libc.include.sched
+ libc.hdr.types.cpu_set_t
+ libc.hdr.types.size_t
)
add_entrypoint_object(
@@ -106,7 +111,7 @@ add_entrypoint_object(
HDRS
../sched_getscheduler.h
DEPENDS
- libc.include.sched
+ libc.hdr.types.pid_t
libc.include.sys_syscall
libc.src.__support.OSUtil.osutil
libc.src.errno.errno
@@ -143,8 +148,9 @@ add_entrypoint_object(
HDRS
../sched_rr_get_interval.h
DEPENDS
+ libc.hdr.types.pid_t
+ libc.hdr.types.struct_timespec
libc.include.sys_syscall
- libc.include.sched
libc.src.__support.OSUtil.osutil
libc.src.errno.errno
)
diff --git a/libc/src/sched/linux/sched_getaffinity.cpp b/libc/src/sched/linux/sched_getaffinity.cpp
index 4a5e91a..d652f7f7 100644
--- a/libc/src/sched/linux/sched_getaffinity.cpp
+++ b/libc/src/sched/linux/sched_getaffinity.cpp
@@ -14,7 +14,9 @@
#include "src/__support/libc_errno.h"
#include "src/__support/macros/config.h"
-#include <sched.h>
+#include "hdr/types/cpu_set_t.h"
+#include "hdr/types/pid_t.h"
+#include "hdr/types/size_t.h"
#include <sys/syscall.h> // For syscall numbers.
namespace LIBC_NAMESPACE_DECL {
diff --git a/libc/src/sched/linux/sched_getcpucount.cpp b/libc/src/sched/linux/sched_getcpucount.cpp
index 7ae166e..dcc2338 100644
--- a/libc/src/sched/linux/sched_getcpucount.cpp
+++ b/libc/src/sched/linux/sched_getcpucount.cpp
@@ -12,7 +12,8 @@
#include "src/__support/common.h"
#include "src/__support/macros/config.h"
-#include <sched.h>
+#include "hdr/types/cpu_set_t.h"
+#include "hdr/types/size_t.h"
#include <stddef.h>
namespace LIBC_NAMESPACE_DECL {
diff --git a/libc/src/sched/linux/sched_getscheduler.cpp b/libc/src/sched/linux/sched_getscheduler.cpp
index d8e0296..10625f2 100644
--- a/libc/src/sched/linux/sched_getscheduler.cpp
+++ b/libc/src/sched/linux/sched_getscheduler.cpp
@@ -13,6 +13,7 @@
#include "src/__support/libc_errno.h"
#include "src/__support/macros/config.h"
+#include "hdr/types/pid_t.h"
#include <sys/syscall.h> // For syscall numbers.
namespace LIBC_NAMESPACE_DECL {
diff --git a/libc/src/sched/linux/sched_rr_get_interval.cpp b/libc/src/sched/linux/sched_rr_get_interval.cpp
index 5668d596b..eecbaa4 100644
--- a/libc/src/sched/linux/sched_rr_get_interval.cpp
+++ b/libc/src/sched/linux/sched_rr_get_interval.cpp
@@ -13,6 +13,8 @@
#include "src/__support/libc_errno.h"
#include "src/__support/macros/config.h"
+#include "hdr/types/pid_t.h"
+#include "hdr/types/struct_timespec.h"
#include <sys/syscall.h> // For syscall numbers.
#ifdef SYS_sched_rr_get_interval_time64
diff --git a/libc/src/sched/linux/sched_setaffinity.cpp b/libc/src/sched/linux/sched_setaffinity.cpp
index 93e930d..3c7ed91 100644
--- a/libc/src/sched/linux/sched_setaffinity.cpp
+++ b/libc/src/sched/linux/sched_setaffinity.cpp
@@ -13,7 +13,9 @@
#include "src/__support/libc_errno.h"
#include "src/__support/macros/config.h"
-#include <sched.h>
+#include "hdr/types/cpu_set_t.h"
+#include "hdr/types/pid_t.h"
+#include "hdr/types/size_t.h"
#include <sys/syscall.h> // For syscall numbers.
namespace LIBC_NAMESPACE_DECL {
diff --git a/libc/src/sched/sched_getaffinity.h b/libc/src/sched/sched_getaffinity.h
index 52ec5bc..8623089 100644
--- a/libc/src/sched/sched_getaffinity.h
+++ b/libc/src/sched/sched_getaffinity.h
@@ -10,7 +10,10 @@
#define LLVM_LIBC_SRC_SCHED_SCHED_GETAFFINITY_H
#include "src/__support/macros/config.h"
-#include <sched.h>
+
+#include "hdr/types/cpu_set_t.h"
+#include "hdr/types/pid_t.h"
+#include "hdr/types/size_t.h"
namespace LIBC_NAMESPACE_DECL {
diff --git a/libc/src/sched/sched_getcpucount.h b/libc/src/sched/sched_getcpucount.h
index 8f35301..0667d8c 100644
--- a/libc/src/sched/sched_getcpucount.h
+++ b/libc/src/sched/sched_getcpucount.h
@@ -10,7 +10,8 @@
#define LLVM_LIBC_SRC_SCHED_SCHED_GETCPUCOUNT_H
#include "src/__support/macros/config.h"
-#include <sched.h>
+
+#include "hdr/types/cpu_set_t.h"
#include <stddef.h>
namespace LIBC_NAMESPACE_DECL {
diff --git a/libc/src/sched/sched_getscheduler.h b/libc/src/sched/sched_getscheduler.h
index d29e902..6407dbf 100644
--- a/libc/src/sched/sched_getscheduler.h
+++ b/libc/src/sched/sched_getscheduler.h
@@ -10,7 +10,8 @@
#define LLVM_LIBC_SRC_SCHED_SCHED_GETSCHEDULER_H
#include "src/__support/macros/config.h"
-#include <sched.h>
+
+#include "hdr/types/pid_t.h"
namespace LIBC_NAMESPACE_DECL {
diff --git a/libc/src/sched/sched_rr_get_interval.h b/libc/src/sched/sched_rr_get_interval.h
index ff09329..4195c14 100644
--- a/libc/src/sched/sched_rr_get_interval.h
+++ b/libc/src/sched/sched_rr_get_interval.h
@@ -10,7 +10,9 @@
#define LLVM_LIBC_SRC_SCHED_SCHED_RR_GET_INTERVAL_H
#include "src/__support/macros/config.h"
-#include <sched.h>
+
+#include "hdr/types/pid_t.h"
+#include "hdr/types/struct_timespec.h"
namespace LIBC_NAMESPACE_DECL {
diff --git a/libc/src/sched/sched_setaffinity.h b/libc/src/sched/sched_setaffinity.h
index cb2303d..f6739ab 100644
--- a/libc/src/sched/sched_setaffinity.h
+++ b/libc/src/sched/sched_setaffinity.h
@@ -10,7 +10,10 @@
#define LLVM_LIBC_SRC_SCHED_SCHED_SETAFFINITY_H
#include "src/__support/macros/config.h"
-#include <sched.h>
+
+#include "hdr/types/cpu_set_t.h"
+#include "hdr/types/pid_t.h"
+#include "hdr/types/size_t.h"
namespace LIBC_NAMESPACE_DECL {
diff --git a/libc/startup/baremetal/CMakeLists.txt b/libc/startup/baremetal/CMakeLists.txt
index 276fe33..e361000 100644
--- a/libc/startup/baremetal/CMakeLists.txt
+++ b/libc/startup/baremetal/CMakeLists.txt
@@ -1,3 +1,34 @@
+# TODO: Use generic "add_startup_object" https://github.com/llvm/llvm-project/issues/133156
+function(add_startup_object name)
+ cmake_parse_arguments(
+ "ADD_STARTUP_OBJECT"
+ "ALIAS" # Option argument
+ "SRC" # Single value arguments
+ "DEPENDS;COMPILE_OPTIONS" # Multi value arguments
+ ${ARGN}
+ )
+
+ get_fq_target_name(${name} fq_target_name)
+ if(ADD_STARTUP_OBJECT_ALIAS)
+ get_fq_deps_list(fq_dep_list ${ADD_STARTUP_OBJECT_DEPENDS})
+ add_library(${fq_target_name} ALIAS ${fq_dep_list})
+ return()
+ endif()
+
+ add_object_library(
+ ${name}
+ SRCS ${ADD_STARTUP_OBJECT_SRC}
+ COMPILE_OPTIONS ${ADD_STARTUP_OBJECT_COMPILE_OPTIONS}
+ ${ADD_STARTUP_OBJECT_UNPARSED_ARGUMENTS}
+ DEPENDS ${ADD_STARTUP_OBJECT_DEPENDS}
+ )
+ set_target_properties(
+ ${fq_target_name}
+ PROPERTIES
+ OUTPUT_NAME ${name}.o
+ )
+endfunction()
+
add_entrypoint_object(
init
SRCS
@@ -5,6 +36,8 @@ add_entrypoint_object(
DEPENDS
libc.hdr.stdint_proxy
libc.src.__support.common
+ HDRS
+ init.h
)
add_entrypoint_object(
@@ -14,4 +47,29 @@ add_entrypoint_object(
DEPENDS
libc.hdr.stdint_proxy
libc.src.__support.common
+ HDRS
+ fini.h
)
+
+if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${LIBC_TARGET_ARCHITECTURE})
+ add_subdirectory(${LIBC_TARGET_ARCHITECTURE})
+else()
+ message(WARNING "Cannot build 'crt1.o' for ${LIBC_TARGET_ARCHITECTURE} yet.")
+ return()
+endif()
+
+add_startup_object(
+ crt1
+ ALIAS
+ DEPENDS
+ .${LIBC_TARGET_ARCHITECTURE}.crt1
+)
+
+add_custom_target(libc-startup)
+
+set(fq_target_name libc.startup.baremetal.${LIBC_TARGET_ARCHITECTURE}.crt1)
+add_dependencies(libc-startup ${fq_target_name})
+install(FILES $<TARGET_OBJECTS:${fq_target_name}>
+ DESTINATION ${LIBC_INSTALL_LIBRARY_DIR}
+ RENAME $<TARGET_PROPERTY:${fq_target_name},OUTPUT_NAME>
+ COMPONENT libc)
diff --git a/libc/startup/baremetal/arm/CMakeLists.txt b/libc/startup/baremetal/arm/CMakeLists.txt
new file mode 100644
index 0000000..f75bd89
--- /dev/null
+++ b/libc/startup/baremetal/arm/CMakeLists.txt
@@ -0,0 +1,16 @@
+add_startup_object(
+ crt1
+ SRC
+ start.cpp
+ DEPENDS
+ libc.src.stdlib.atexit
+ libc.src.stdlib.exit
+ libc.src.string.memcpy
+ libc.src.string.memset
+ libc.startup.baremetal.init
+ libc.startup.baremetal.fini
+ COMPILE_OPTIONS
+ -ffreestanding # To avoid compiler warnings about calling the main function.
+ -fno-builtin
+ -Wno-global-constructors # To allow vector table initialization
+)
diff --git a/libc/startup/baremetal/arm/start.cpp b/libc/startup/baremetal/arm/start.cpp
new file mode 100644
index 0000000..123efc4
--- /dev/null
+++ b/libc/startup/baremetal/arm/start.cpp
@@ -0,0 +1,92 @@
+//===-- Implementation of crt for arm -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/macros/config.h"
+#include "src/stdlib/atexit.h"
+#include "src/stdlib/exit.h"
+#include "src/string/memcpy.h"
+#include "src/string/memset.h"
+#include "startup/baremetal/fini.h"
+#include "startup/baremetal/init.h"
+
+#include <stdint.h>
+
+extern "C" {
+int main(int argc, char **argv);
+void _start();
+
+// Semihosting library initialisation if applicable. Required for printf, etc.
+[[gnu::weak]] void _platform_init() {}
+
+// These symbols are provided by the linker. The exact names are not defined by
+// a standard.
+extern uintptr_t __stack;
+extern uintptr_t __data_source[];
+extern uintptr_t __data_start[];
+extern uintptr_t __data_size[];
+extern uintptr_t __bss_start[];
+extern uintptr_t __bss_size[];
+
+// Based on
+// https://developer.arm.com/documentation/107565/0101/Use-case-examples/Generic-Information/What-is-inside-a-program-image-/Vector-table
+void NMI_Handler() {}
+void HardFault_Handler() { LIBC_NAMESPACE::exit(1); }
+void MemManage_Handler() { LIBC_NAMESPACE::exit(1); }
+void BusFault_Handler() { LIBC_NAMESPACE::exit(1); }
+void UsageFault_Handler() { LIBC_NAMESPACE::exit(1); }
+void SVC_Handler() {}
+void DebugMon_Handler() {}
+void PendSV_Handler() {}
+void SysTick_Handler() {}
+
+// Architecturally the bottom 7 bits of VTOR are zero, meaning the vector table
+// has to be 128-byte aligned, however an implementation can require more bits
+// to be zero and Cortex-M23 can require up to 10, so 1024-byte align the vector
+// table.
+using HandlerType = void (*)(void);
+const HandlerType vector_table[]
+ __attribute__((section(".vectors"), aligned(1024), used)) = {
+ (HandlerType)&__stack, // SP
+ _start, // Reset
+ NMI_Handler, // NMI Handler
+ HardFault_Handler, // Hard Fault Handlerß
+ MemManage_Handler, // MPU Fault Han`dler
+ BusFault_Handler, // Bus Fault Handler
+ UsageFault_Handler, // Usage Fault Handler
+ 0, // Reserved
+ 0, // Reserved
+ 0, // Reserved
+ 0, // Reserved
+ SVC_Handler, // SVC Handler
+ DebugMon_Handler, // Debug Monitor Handler
+ 0, // Reserved
+ PendSV_Handler, // PendSV Handler
+ SysTick_Handler, // SysTick Handler
+ // Unused
+};
+} // extern "C"
+
+namespace LIBC_NAMESPACE_DECL {
+[[noreturn]] void do_start() {
+ // FIXME: set up the QEMU test environment
+
+ // Perform the equivalent of scatterloading
+ LIBC_NAMESPACE::memcpy(__data_start, __data_source, (uintptr_t)__data_size);
+ LIBC_NAMESPACE::memset(__bss_start, '\0', (uintptr_t)__bss_size);
+ __libc_init_array();
+
+ _platform_init();
+ LIBC_NAMESPACE::atexit(&__libc_fini_array);
+ LIBC_NAMESPACE::exit(main(0, 0));
+}
+} // namespace LIBC_NAMESPACE_DECL
+
+extern "C" void _start() {
+ asm volatile("mov sp, %0" : : "r"(&__stack));
+ asm volatile("bl %0" : : "X"(LIBC_NAMESPACE::do_start));
+}
diff --git a/libc/startup/baremetal/fini.cpp b/libc/startup/baremetal/fini.cpp
index 64af842..66714e2 100644
--- a/libc/startup/baremetal/fini.cpp
+++ b/libc/startup/baremetal/fini.cpp
@@ -6,17 +6,13 @@
//
//===----------------------------------------------------------------------===//
-#include "hdr/stdint_proxy.h"
+#include "startup/baremetal/fini.h"
+
#include "src/__support/macros/config.h"
#include <stddef.h>
namespace LIBC_NAMESPACE_DECL {
-extern "C" {
-extern uintptr_t __fini_array_start[];
-extern uintptr_t __fini_array_end[];
-}
-
using FiniCallback = void(void);
extern "C" void __libc_fini_array(void) {
diff --git a/libc/startup/baremetal/fini.h b/libc/startup/baremetal/fini.h
new file mode 100644
index 0000000..74e9601
--- /dev/null
+++ b/libc/startup/baremetal/fini.h
@@ -0,0 +1,16 @@
+//===-- Implementation header of __libc_fini_array ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "hdr/stdint_proxy.h"
+
+extern "C" {
+extern uintptr_t __fini_array_start[];
+extern uintptr_t __fini_array_end[];
+
+void __libc_fini_array(void);
+} // extern "C"
diff --git a/libc/startup/baremetal/init.cpp b/libc/startup/baremetal/init.cpp
index 995609c..89065fd 100644
--- a/libc/startup/baremetal/init.cpp
+++ b/libc/startup/baremetal/init.cpp
@@ -6,19 +6,13 @@
//
//===----------------------------------------------------------------------===//
-#include "hdr/stdint_proxy.h"
+#include "startup/baremetal/init.h"
+
#include "src/__support/macros/config.h"
#include <stddef.h>
namespace LIBC_NAMESPACE_DECL {
-extern "C" {
-extern uintptr_t __preinit_array_start[];
-extern uintptr_t __preinit_array_end[];
-extern uintptr_t __init_array_start[];
-extern uintptr_t __init_array_end[];
-}
-
using InitCallback = void(void);
extern "C" void __libc_init_array(void) {
diff --git a/libc/startup/baremetal/init.h b/libc/startup/baremetal/init.h
new file mode 100644
index 0000000..6b545db3
--- /dev/null
+++ b/libc/startup/baremetal/init.h
@@ -0,0 +1,18 @@
+//===-- Implementation header of __libc_init_array ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "hdr/stdint_proxy.h"
+
+extern "C" {
+extern uintptr_t __preinit_array_start[];
+extern uintptr_t __preinit_array_end[];
+extern uintptr_t __init_array_start[];
+extern uintptr_t __init_array_end[];
+
+void __libc_init_array(void);
+} // extern "C"
diff --git a/libc/test/shared/CMakeLists.txt b/libc/test/shared/CMakeLists.txt
index 6d0601f..4ed32d4 100644
--- a/libc/test/shared/CMakeLists.txt
+++ b/libc/test/shared/CMakeLists.txt
@@ -19,7 +19,9 @@ add_fp_unittest(
libc.src.__support.math.asinhf
libc.src.__support.math.asinhf16
libc.src.__support.math.atan
+ libc.src.__support.math.atan2
libc.src.__support.math.atanf
+ libc.src.__support.math.atanf16
libc.src.__support.math.erff
libc.src.__support.math.exp
libc.src.__support.math.exp10
diff --git a/libc/test/shared/shared_math_test.cpp b/libc/test/shared/shared_math_test.cpp
index 228fa42..cd72df4 100644
--- a/libc/test/shared/shared_math_test.cpp
+++ b/libc/test/shared/shared_math_test.cpp
@@ -19,6 +19,7 @@ TEST(LlvmLibcSharedMathTest, AllFloat16) {
EXPECT_FP_EQ(0x0p+0f16, LIBC_NAMESPACE::shared::acospif16(1.0f16));
EXPECT_FP_EQ(0x0p+0f16, LIBC_NAMESPACE::shared::asinf16(0.0f16));
EXPECT_FP_EQ(0x0p+0f16, LIBC_NAMESPACE::shared::asinhf16(0.0f16));
+ EXPECT_FP_EQ(0x0p+0f16, LIBC_NAMESPACE::shared::atanf16(0.0f16));
EXPECT_FP_EQ(0x1p+0f16, LIBC_NAMESPACE::shared::exp10f16(0.0f16));
@@ -61,6 +62,7 @@ TEST(LlvmLibcSharedMathTest, AllDouble) {
EXPECT_FP_EQ(0x1.921fb54442d18p+0, LIBC_NAMESPACE::shared::acos(0.0));
EXPECT_FP_EQ(0x0p+0, LIBC_NAMESPACE::shared::asin(0.0));
EXPECT_FP_EQ(0x0p+0, LIBC_NAMESPACE::shared::atan(0.0));
+ EXPECT_FP_EQ(0x0p+0, LIBC_NAMESPACE::shared::atan2(0.0, 0.0));
EXPECT_FP_EQ(0x1p+0, LIBC_NAMESPACE::shared::exp(0.0));
EXPECT_FP_EQ(0x1p+0, LIBC_NAMESPACE::shared::exp10(0.0));
}
diff --git a/libc/test/src/sched/CMakeLists.txt b/libc/test/src/sched/CMakeLists.txt
index 54c6d1d..f6151d0 100644
--- a/libc/test/src/sched/CMakeLists.txt
+++ b/libc/test/src/sched/CMakeLists.txt
@@ -7,7 +7,8 @@ add_libc_unittest(
SRCS
affinity_test.cpp
DEPENDS
- libc.include.sched
+ libc.hdr.types.cpu_set_t
+ libc.hdr.types.pid_t
libc.include.sys_syscall
libc.src.__support.OSUtil.osutil
libc.src.errno.errno
@@ -34,7 +35,7 @@ add_libc_unittest(
SRCS
get_priority_test.cpp
DEPENDS
- libc.include.sched
+ libc.hdr.sched_macros
libc.src.errno.errno
libc.src.sched.sched_get_priority_min
libc.src.sched.sched_get_priority_max
@@ -78,7 +79,7 @@ add_libc_unittest(
SRCS
sched_rr_get_interval_test.cpp
DEPENDS
- libc.include.sched
+ libc.hdr.types.struct_timespec
libc.src.errno.errno
libc.src.sched.sched_getscheduler
libc.src.sched.sched_setscheduler
@@ -94,7 +95,9 @@ add_libc_unittest(
SRCS
cpu_count_test.cpp
DEPENDS
- libc.include.sched
+ libc.hdr.sched_macros
+ libc.hdr.types.cpu_set_t
+ libc.hdr.types.pid_t
libc.include.sys_syscall
libc.src.__support.OSUtil.osutil
libc.src.errno.errno
diff --git a/libc/test/src/sched/affinity_test.cpp b/libc/test/src/sched/affinity_test.cpp
index b77f22f..1c8599b 100644
--- a/libc/test/src/sched/affinity_test.cpp
+++ b/libc/test/src/sched/affinity_test.cpp
@@ -12,7 +12,8 @@
#include "src/sched/sched_setaffinity.h"
#include "test/UnitTest/ErrnoSetterMatcher.h"
-#include <sched.h>
+#include "hdr/types/cpu_set_t.h"
+#include "hdr/types/pid_t.h"
#include <sys/syscall.h>
TEST(LlvmLibcSchedAffinityTest, SmokeTest) {
diff --git a/libc/test/src/sched/cpu_count_test.cpp b/libc/test/src/sched/cpu_count_test.cpp
index 919f147..06e4fff 100644
--- a/libc/test/src/sched/cpu_count_test.cpp
+++ b/libc/test/src/sched/cpu_count_test.cpp
@@ -12,8 +12,9 @@
#include "src/sched/sched_getcpucount.h"
#include "test/UnitTest/ErrnoSetterMatcher.h"
-#include <sched.h>
-#include <sys/syscall.h>
+#include "hdr/sched_macros.h"
+#include "hdr/types/cpu_set_t.h"
+#include "hdr/types/pid_t.h"
TEST(LlvmLibcSchedCpuCountTest, SmokeTest) {
cpu_set_t mask;
diff --git a/libc/test/src/sched/get_priority_test.cpp b/libc/test/src/sched/get_priority_test.cpp
index bb41dc0b..bf4fca8 100644
--- a/libc/test/src/sched/get_priority_test.cpp
+++ b/libc/test/src/sched/get_priority_test.cpp
@@ -11,7 +11,7 @@
#include "src/sched/sched_get_priority_min.h"
#include "test/UnitTest/Test.h"
-#include <sched.h>
+#include "hdr/sched_macros.h"
TEST(LlvmLibcSchedGetPriorityTest, HandleBadPolicyTest) {
diff --git a/libc/test/src/sched/sched_rr_get_interval_test.cpp b/libc/test/src/sched/sched_rr_get_interval_test.cpp
index a0fe5ed..272cf86 100644
--- a/libc/test/src/sched/sched_rr_get_interval_test.cpp
+++ b/libc/test/src/sched/sched_rr_get_interval_test.cpp
@@ -14,7 +14,7 @@
#include "src/unistd/getuid.h"
#include "test/UnitTest/Test.h"
-#include <sched.h>
+#include "hdr/types/struct_timespec.h"
TEST(LlvmLibcSchedRRGetIntervalTest, SmokeTest) {
libc_errno = 0;
diff --git a/libclc/CMakeLists.txt b/libclc/CMakeLists.txt
index 328dfcf..5b95edc 100644
--- a/libclc/CMakeLists.txt
+++ b/libclc/CMakeLists.txt
@@ -42,6 +42,10 @@ set( LIBCLC_TARGETS_TO_BUILD "all"
option( ENABLE_RUNTIME_SUBNORMAL "Enable runtime linking of subnormal support." OFF )
+option(
+ LIBCLC_USE_SPIRV_BACKEND "Build SPIR-V targets with the SPIR-V backend." OFF
+)
+
# Top level target used to build all Libclc libraries.
add_custom_target( libclc ALL )
@@ -115,14 +119,17 @@ foreach( tool IN ITEMS clang opt llvm-as llvm-link )
endif()
endforeach()
-# llvm-spirv is an optional dependency, used to build spirv-* targets.
-# It may be provided in-tree or externally.
-if( TARGET llvm-spirv )
- get_host_tool_path( llvm-spirv LLVM_SPIRV llvm-spirv_exe llvm-spirv_target )
-else()
- find_program( LLVM_SPIRV llvm-spirv HINTS ${LLVM_TOOLS_BINARY_DIR} )
- set( llvm-spirv_exe "${LLVM_SPIRV}" )
- set( llvm-spirv_target )
+if( NOT LIBCLC_USE_SPIRV_BACKEND )
+ # llvm-spirv is an optional dependency, used to build spirv-* targets when
+ # the SPIR-V backend hasn't been requested. It may be provided in-tree or
+ # externally.
+ if( TARGET llvm-spirv )
+ get_host_tool_path( llvm-spirv LLVM_SPIRV llvm-spirv_exe llvm-spirv_target )
+ else()
+ find_program( LLVM_SPIRV llvm-spirv HINTS ${LLVM_TOOLS_BINARY_DIR} )
+ set( llvm-spirv_exe "${LLVM_SPIRV}" )
+ set( llvm-spirv_target )
+ endif()
endif()
# List of all targets. Note that some are added dynamically below.
@@ -138,22 +145,24 @@ set( LIBCLC_TARGETS_ALL
nvptx64--nvidiacl
)
-# mesa3d environment is only available since LLVM 4.0
+# The mesa3d environment is only available since LLVM 4.0
if( LLVM_PACKAGE_VERSION VERSION_GREATER_EQUAL 4.0.0 )
list( APPEND LIBCLC_TARGETS_ALL amdgcn-mesa-mesa3d )
endif()
-# spirv-mesa3d and spirv64-mesa3d targets can only be built with the (optional)
-# llvm-spirv external tool.
-if( llvm-spirv_exe )
- list( APPEND LIBCLC_TARGETS_ALL spirv-mesa3d- spirv64-mesa3d- )
+# The spirv-mesa3d and spirv64-mesa3d targets are optional and can be built
+# with either the LLVM SPIR-V backend or the external llvm-spirv tool.
+if( LIBCLC_USE_SPIRV_BACKEND OR llvm-spirv_exe )
+ list( APPEND LIBCLC_TARGETS_ALL spirv-mesa3d- spirv64-mesa3d- )
endif()
# Verify that the user hasn't requested mesa3d targets without an available
# llvm-spirv tool.
-if( "spirv-mesa3d-" IN_LIST LIBCLC_TARGETS_TO_BUILD OR "spirv64-mesa3d-" IN_LIST LIBCLC_TARGETS_TO_BUILD )
- if( NOT llvm-spirv_exe )
- message( FATAL_ERROR "SPIR-V targets requested, but spirv-tools is not installed" )
+if( spirv-mesa3d- IN_LIST LIBCLC_TARGETS_TO_BUILD
+ OR spirv64-mesa3d- IN_LIST LIBCLC_TARGETS_TO_BUILD )
+ if( NOT LIBCLC_USE_SPIRV_BACKEND AND NOT llvm-spirv_exe )
+ message( FATAL_ERROR "SPIR-V targets requested, but spirv-tools is not "
+ "installed and the SPIR-V backend has not been requested." )
endif()
endif()
diff --git a/libclc/cmake/modules/AddLibclc.cmake b/libclc/cmake/modules/AddLibclc.cmake
index 9b0e5d9..47185586 100644
--- a/libclc/cmake/modules/AddLibclc.cmake
+++ b/libclc/cmake/modules/AddLibclc.cmake
@@ -164,7 +164,9 @@ function(get_libclc_device_info)
list( GET TRIPLE 0 ARCH )
# Some targets don't have a specific device architecture to target
- if( ARG_DEVICE STREQUAL none OR ARCH STREQUAL spirv OR ARCH STREQUAL spirv64 )
+ if( ARG_DEVICE STREQUAL none
+ OR ((ARCH STREQUAL spirv OR ARCH STREQUAL spirv64)
+ AND NOT LIBCLC_USE_SPIRV_BACKEND) )
set( cpu )
set( arch_suffix "${ARG_TRIPLE}" )
else()
@@ -182,7 +184,11 @@ function(get_libclc_device_info)
# Some libclc targets are not real clang triples: return their canonical
# triples.
- if( ARCH STREQUAL spirv OR ARCH STREQUAL clspv )
+ if( ARCH STREQUAL spirv AND LIBCLC_USE_SPIRV_BACKEND )
+ set( ARG_TRIPLE "spirv32--" )
+ elseif( ARCH STREQUAL spirv64 AND LIBCLC_USE_SPIRV_BACKEND )
+ set( ARG_TRIPLE "spirv64--" )
+ elseif( ARCH STREQUAL spirv OR ARCH STREQUAL clspv )
set( ARG_TRIPLE "spir--" )
elseif( ARCH STREQUAL spirv64 OR ARCH STREQUAL clspv64 )
set( ARG_TRIPLE "spir64--" )
@@ -363,10 +369,17 @@ function(add_libclc_builtin_set)
if( ARG_ARCH STREQUAL spirv OR ARG_ARCH STREQUAL spirv64 )
set( obj_suffix ${ARG_ARCH_SUFFIX}.spv )
set( libclc_builtins_lib ${LIBCLC_OUTPUT_LIBRARY_DIR}/${obj_suffix} )
- add_custom_command( OUTPUT ${libclc_builtins_lib}
- COMMAND ${llvm-spirv_exe} ${spvflags} -o ${libclc_builtins_lib} ${builtins_link_lib}
- DEPENDS ${llvm-spirv_target} ${builtins_link_lib} ${builtins_link_lib_tgt}
- )
+ if ( LIBCLC_USE_SPIRV_BACKEND )
+ add_custom_command( OUTPUT ${libclc_builtins_lib}
+ COMMAND ${clang_exe} --target=${ARG_TRIPLE} -x ir -o ${libclc_builtins_lib} ${builtins_link_lib}
+ DEPENDS ${clang_target} ${builtins_link_lib} ${builtins_link_lib_tgt}
+ )
+ else()
+ add_custom_command( OUTPUT ${libclc_builtins_lib}
+ COMMAND ${llvm-spirv_exe} ${spvflags} -o ${libclc_builtins_lib} ${builtins_link_lib}
+ DEPENDS ${llvm-spirv_target} ${builtins_link_lib} ${builtins_link_lib_tgt}
+ )
+ endif()
else()
# Non-SPIR-V targets add an extra step to optimize the bytecode
set( builtins_opt_lib_tgt builtins.opt.${ARG_ARCH_SUFFIX} )
diff --git a/libcxx/include/__assert b/libcxx/include/__assert
index 90eaa60..a9451da 100644
--- a/libcxx/include/__assert
+++ b/libcxx/include/__assert
@@ -20,8 +20,8 @@
#define _LIBCPP_ASSERT(expression, message) \
(__builtin_expect(static_cast<bool>(expression), 1) \
? (void)0 \
- : _LIBCPP_ASSERTION_HANDLER(__FILE__ ":" _LIBCPP_TOSTRING(__LINE__) ": assertion " _LIBCPP_TOSTRING( \
- expression) " failed: " message "\n"))
+ : _LIBCPP_ASSERTION_HANDLER(__FILE__ ":" _LIBCPP_TOSTRING( \
+ __LINE__) ": libc++ Hardening assertion " _LIBCPP_TOSTRING(expression) " failed: " message "\n"))
// WARNING: __builtin_assume can currently inhibit optimizations. Only add assumptions with a clear
// optimization intent. See https://discourse.llvm.org/t/llvm-assume-blocks-optimization/71609 for a
diff --git a/libcxx/include/__config b/libcxx/include/__config
index 549aa06..77a71b6 100644
--- a/libcxx/include/__config
+++ b/libcxx/include/__config
@@ -64,9 +64,21 @@
// HARDENING {
-// TODO: Remove in LLVM 21. We're making this an error to catch folks who might not have migrated.
-# ifdef _LIBCPP_ENABLE_ASSERTIONS
-# error "_LIBCPP_ENABLE_ASSERTIONS has been removed, please use _LIBCPP_HARDENING_MODE instead"
+// TODO(LLVM 23): Remove this. We're making these an error to catch folks who might not have migrated.
+// Since hardening went through several changes (many of which impacted user-facing macros),
+// we're keeping these checks around for a bit longer than usual. Failure to properly configure
+// hardening results in checks being dropped silently, which is a pretty big deal.
+# if defined(_LIBCPP_ENABLE_ASSERTIONS)
+# error "_LIBCPP_ENABLE_ASSERTIONS has been removed, please use _LIBCPP_HARDENING_MODE=<mode> instead (see docs)"
+# endif
+# if defined(_LIBCPP_ENABLE_HARDENED_MODE)
+# error "_LIBCPP_ENABLE_HARDENED_MODE has been removed, please use _LIBCPP_HARDENING_MODE=<mode> instead (see docs)"
+# endif
+# if defined(_LIBCPP_ENABLE_SAFE_MODE)
+# error "_LIBCPP_ENABLE_SAFE_MODE has been removed, please use _LIBCPP_HARDENING_MODE=<mode> instead (see docs)"
+# endif
+# if defined(_LIBCPP_ENABLE_DEBUG_MODE)
+# error "_LIBCPP_ENABLE_DEBUG_MODE has been removed, please use _LIBCPP_HARDENING_MODE=<mode> instead (see docs)"
# endif
// The library provides the macro `_LIBCPP_HARDENING_MODE` which can be set to one of the following values:
diff --git a/libcxx/include/__cxx03/__math/logarithms.h b/libcxx/include/__cxx03/__math/logarithms.h
index 2547350..9b9e59a 100644
--- a/libcxx/include/__cxx03/__math/logarithms.h
+++ b/libcxx/include/__cxx03/__math/logarithms.h
@@ -58,7 +58,7 @@ inline _LIBCPP_HIDE_FROM_ABI double log10(_A1 __x) _NOEXCEPT {
inline _LIBCPP_HIDE_FROM_ABI int ilogb(float __x) _NOEXCEPT { return __builtin_ilogbf(__x); }
template <class = int>
-_LIBCPP_HIDE_FROM_ABI double ilogb(double __x) _NOEXCEPT {
+_LIBCPP_HIDE_FROM_ABI int ilogb(double __x) _NOEXCEPT {
return __builtin_ilogb(__x);
}
diff --git a/libcxx/include/__hash_table b/libcxx/include/__hash_table
index 03f50d9..dacc152 100644
--- a/libcxx/include/__hash_table
+++ b/libcxx/include/__hash_table
@@ -1709,41 +1709,45 @@ void __hash_table<_Tp, _Hash, _Equal, _Alloc>::__rehash(size_type __n) _LIBCPP_D
template <class _Tp, class _Hash, class _Equal, class _Alloc>
template <bool _UniqueKeys>
-void __hash_table<_Tp, _Hash, _Equal, _Alloc>::__do_rehash(size_type __nbc) {
- __pointer_allocator& __npa = __bucket_list_.get_deleter().__alloc();
- __bucket_list_.reset(__nbc > 0 ? __pointer_alloc_traits::allocate(__npa, __nbc) : nullptr);
- __bucket_list_.get_deleter().size() = __nbc;
- if (__nbc > 0) {
- for (size_type __i = 0; __i < __nbc; ++__i)
- __bucket_list_[__i] = nullptr;
- __next_pointer __pp = __first_node_.__ptr();
- __next_pointer __cp = __pp->__next_;
- if (__cp != nullptr) {
- size_type __chash = std::__constrain_hash(__cp->__hash(), __nbc);
- __bucket_list_[__chash] = __pp;
- size_type __phash = __chash;
- for (__pp = __cp, void(), __cp = __cp->__next_; __cp != nullptr; __cp = __pp->__next_) {
- __chash = std::__constrain_hash(__cp->__hash(), __nbc);
- if (__chash == __phash)
- __pp = __cp;
- else {
- if (__bucket_list_[__chash] == nullptr) {
- __bucket_list_[__chash] = __pp;
- __pp = __cp;
- __phash = __chash;
- } else {
- __next_pointer __np = __cp;
- if _LIBCPP_CONSTEXPR_SINCE_CXX17 (!_UniqueKeys) {
- for (; __np->__next_ != nullptr &&
- key_eq()(__cp->__upcast()->__get_value(), __np->__next_->__upcast()->__get_value());
- __np = __np->__next_)
- ;
- }
- __pp->__next_ = __np->__next_;
- __np->__next_ = __bucket_list_[__chash]->__next_;
- __bucket_list_[__chash]->__next_ = __cp;
- }
+void __hash_table<_Tp, _Hash, _Equal, _Alloc>::__do_rehash(size_type __bucket_count) {
+ __pointer_allocator& __ptr_alloc = __bucket_list_.get_deleter().__alloc();
+ __bucket_list_.reset(__bucket_count > 0 ? __pointer_alloc_traits::allocate(__ptr_alloc, __bucket_count) : nullptr);
+ __bucket_list_.get_deleter().size() = __bucket_count;
+
+ if (__bucket_count == 0)
+ return;
+
+ for (size_type __i = 0; __i < __bucket_count; ++__i)
+ __bucket_list_[__i] = nullptr;
+ __next_pointer __pp = __first_node_.__ptr();
+ __next_pointer __cp = __pp->__next_;
+
+ if (!__cp)
+ return;
+
+ size_type __chash = std::__constrain_hash(__cp->__hash(), __bucket_count);
+ __bucket_list_[__chash] = __pp;
+ size_type __phash = __chash;
+ for (__pp = __cp, void(), __cp = __cp->__next_; __cp != nullptr; __cp = __pp->__next_) {
+ __chash = std::__constrain_hash(__cp->__hash(), __bucket_count);
+ if (__chash == __phash)
+ __pp = __cp;
+ else {
+ if (__bucket_list_[__chash] == nullptr) {
+ __bucket_list_[__chash] = __pp;
+ __pp = __cp;
+ __phash = __chash;
+ } else {
+ __next_pointer __np = __cp;
+ if _LIBCPP_CONSTEXPR (!_UniqueKeys) {
+ for (; __np->__next_ != nullptr &&
+ key_eq()(__cp->__upcast()->__get_value(), __np->__next_->__upcast()->__get_value());
+ __np = __np->__next_)
+ ;
}
+ __pp->__next_ = __np->__next_;
+ __np->__next_ = __bucket_list_[__chash]->__next_;
+ __bucket_list_[__chash]->__next_ = __cp;
}
}
}
diff --git a/libcxx/include/__math/logarithms.h b/libcxx/include/__math/logarithms.h
index 5f5f943..7343d6a 100644
--- a/libcxx/include/__math/logarithms.h
+++ b/libcxx/include/__math/logarithms.h
@@ -58,7 +58,7 @@ inline _LIBCPP_HIDE_FROM_ABI double log10(_A1 __x) _NOEXCEPT {
inline _LIBCPP_HIDE_FROM_ABI int ilogb(float __x) _NOEXCEPT { return __builtin_ilogbf(__x); }
template <class = int>
-_LIBCPP_HIDE_FROM_ABI double ilogb(double __x) _NOEXCEPT {
+_LIBCPP_HIDE_FROM_ABI int ilogb(double __x) _NOEXCEPT {
return __builtin_ilogb(__x);
}
diff --git a/libcxx/include/tuple b/libcxx/include/tuple
index 1623702..1946034 100644
--- a/libcxx/include/tuple
+++ b/libcxx/include/tuple
@@ -448,33 +448,28 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 void __swallow(_Tp&&...) _NO
template <class _Indx, class... _Tp>
struct __tuple_impl;
+struct __forward_args {};
+struct __value_init {};
+
template <size_t... _Indx, class... _Tp>
struct _LIBCPP_DECLSPEC_EMPTY_BASES
__tuple_impl<__index_sequence<_Indx...>, _Tp...> : public __tuple_leaf<_Indx, _Tp>... {
_LIBCPP_HIDE_FROM_ABI constexpr __tuple_impl() noexcept(
__all<is_nothrow_default_constructible<_Tp>::value...>::value) {}
- template <size_t... _Uf, class... _Tf, size_t... _Ul, class... _Tl, class... _Up>
- _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 explicit __tuple_impl(
- __index_sequence<_Uf...>,
- __tuple_types<_Tf...>,
- __index_sequence<_Ul...>,
- __tuple_types<_Tl...>,
- _Up&&... __u) noexcept(__all<is_nothrow_constructible<_Tf, _Up>::value...>::value &&
- __all<is_nothrow_default_constructible<_Tl>::value...>::value)
- : __tuple_leaf<_Uf, _Tf>(std::forward<_Up>(__u))..., __tuple_leaf<_Ul, _Tl>()... {}
-
- template <class _Alloc, size_t... _Uf, class... _Tf, size_t... _Ul, class... _Tl, class... _Up>
+ template <class... _Args>
+ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 explicit __tuple_impl(__forward_args, _Args&&... __args)
+ : __tuple_leaf<_Indx, _Tp>(std::forward<_Args>(__args))... {}
+
+ template <class _Alloc>
+ _LIBCPP_HIDE_FROM_ABI
+ _LIBCPP_CONSTEXPR_SINCE_CXX14 explicit __tuple_impl(allocator_arg_t, const _Alloc& __alloc, __value_init)
+ : __tuple_leaf<_Indx, _Tp>(__uses_alloc_ctor<_Tp, _Alloc>(), __alloc)... {}
+
+ template <class _Alloc, class... _Args>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 explicit __tuple_impl(
- allocator_arg_t,
- const _Alloc& __a,
- __index_sequence<_Uf...>,
- __tuple_types<_Tf...>,
- __index_sequence<_Ul...>,
- __tuple_types<_Tl...>,
- _Up&&... __u)
- : __tuple_leaf<_Uf, _Tf>(__uses_alloc_ctor<_Tf, _Alloc, _Up>(), __a, std::forward<_Up>(__u))...,
- __tuple_leaf<_Ul, _Tl>(__uses_alloc_ctor<_Tl, _Alloc>(), __a)... {}
+ allocator_arg_t, const _Alloc& __alloc, __forward_args, _Args&&... __args)
+ : __tuple_leaf<_Indx, _Tp>(__uses_alloc_ctor<_Tp, _Alloc, _Args>(), __alloc, std::forward<_Args>(__args))... {}
template <class _Tuple, __enable_if_t<__tuple_constructible<_Tuple, tuple<_Tp...> >::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 __tuple_impl(_Tuple&& __t) noexcept(
@@ -559,12 +554,7 @@ public:
__enable_if_t< _And< _IsDefault<_Tp>... >::value, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit(_Not<_Lazy<_And, _IsImpDefault<_Tp>...> >::value)
tuple(allocator_arg_t, _Alloc const& __a)
- : __base_(allocator_arg_t(),
- __a,
- __index_sequence<>(),
- __tuple_types<>(),
- __make_index_sequence<sizeof...(_Tp)>(),
- __tuple_types<_Tp...>()) {}
+ : __base_(allocator_arg_t(), __a, __value_init{}) {}
// tuple(const T&...) constructors (including allocator_arg_t variants)
template <template <class...> class _And = _And,
@@ -572,11 +562,7 @@ public:
_LIBCPP_HIDE_FROM_ABI
_LIBCPP_CONSTEXPR_SINCE_CXX14 explicit(_Not<_Lazy<_And, is_convertible<const _Tp&, _Tp>...> >::value)
tuple(const _Tp&... __t) noexcept(_And<is_nothrow_copy_constructible<_Tp>...>::value)
- : __base_(__make_index_sequence<sizeof...(_Tp)>(),
- __tuple_types<_Tp...>(),
- __index_sequence<>(),
- __tuple_types<>(),
- __t...) {}
+ : __base_(__forward_args{}, __t...) {}
template <class _Alloc,
template <class...> class _And = _And,
@@ -584,13 +570,7 @@ public:
_LIBCPP_HIDE_FROM_ABI
_LIBCPP_CONSTEXPR_SINCE_CXX20 explicit(_Not<_Lazy<_And, is_convertible<const _Tp&, _Tp>...> >::value)
tuple(allocator_arg_t, const _Alloc& __a, const _Tp&... __t)
- : __base_(allocator_arg_t(),
- __a,
- __make_index_sequence<sizeof...(_Tp)>(),
- __tuple_types<_Tp...>(),
- __index_sequence<>(),
- __tuple_types<>(),
- __t...) {}
+ : __base_(allocator_arg_t(), __a, __forward_args{}, __t...) {}
// tuple(U&& ...) constructors (including allocator_arg_t variants)
template <class... _Up>
@@ -609,11 +589,7 @@ public:
int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 explicit(_Not<_Lazy<_And, is_convertible<_Up, _Tp>...> >::value)
tuple(_Up&&... __u) noexcept(_And<is_nothrow_constructible<_Tp, _Up>...>::value)
- : __base_(__make_index_sequence<sizeof...(_Up)>(),
- __tuple_types<_Tp...>(),
- __index_sequence<>(),
- __tuple_types<>(),
- std::forward<_Up>(__u)...) {}
+ : __base_(__forward_args{}, std::forward<_Up>(__u)...) {}
template <class _Alloc,
class... _Up,
@@ -621,13 +597,7 @@ public:
int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit(_Not<_Lazy<_And, is_convertible<_Up, _Tp>...> >::value)
tuple(allocator_arg_t, const _Alloc& __a, _Up&&... __u)
- : __base_(allocator_arg_t(),
- __a,
- __make_index_sequence<sizeof...(_Up)>(),
- __tuple_types<_Tp...>(),
- __index_sequence<>(),
- __tuple_types<>(),
- std::forward<_Up>(__u)...) {}
+ : __base_(allocator_arg_t(), __a, __forward_args{}, std::forward<_Up>(__u)...) {}
// Copy and move constructors (including the allocator_arg_t variants)
tuple(const tuple&) = default;
diff --git a/libcxx/test/support/check_assertion.h b/libcxx/test/support/check_assertion.h
index a091043..8416de7 100644
--- a/libcxx/test/support/check_assertion.h
+++ b/libcxx/test/support/check_assertion.h
@@ -52,8 +52,8 @@ MatchResult MatchAssertionMessage(const std::string& text, std::string_view expe
// library.
std::string assertion_format_string = [&] {
if (use_marker)
- return (".*###\\n(.*):(\\d+): assertion (.*) failed: (.*)\\n###");
- return ("(.*):(\\d+): assertion (.*) failed: (.*)\\n");
+ return (".*###\\n(.*):(\\d+): libc\\+\\+ Hardening assertion (.*) failed: (.*)\\n###");
+ return ("(.*):(\\d+): libc\\+\\+ Hardening assertion (.*) failed: (.*)\\n");
}();
std::regex assertion_format(assertion_format_string);
diff --git a/libsycl/.clang-format b/libsycl/.clang-format
new file mode 100644
index 0000000..24550f8
--- /dev/null
+++ b/libsycl/.clang-format
@@ -0,0 +1,4 @@
+BasedOnStyle: LLVM
+
+# Preferred indentions of preprocessor statements.
+IndentPPDirectives: AfterHash
diff --git a/libsycl/.clang-tidy b/libsycl/.clang-tidy
new file mode 100644
index 0000000..cdc7a29
--- /dev/null
+++ b/libsycl/.clang-tidy
@@ -0,0 +1,17 @@
+Checks: >
+ -*,
+ clang-analyzer-*,
+ clang-diagnostic-*,
+ cppcoreguidelines-*,
+ -cppcoreguidelines-pro-bounds-array-to-pointer-decay,
+ -cppcoreguidelines-pro-bounds-constant-array-index,
+ -cppcoreguidelines-pro-bounds-pointer-arithmetic,
+ -cppcoreguidelines-pro-type-member-init,
+ -cppcoreguidelines-pro-type-union-access,
+ google-*,
+ -google-build-using-namespace,
+ -google-explicit-constructor,
+ -google-runtime-references,
+ misc-*,
+ -misc-macro-parentheses,
+ -misc-unused-parameters
diff --git a/libsycl/CMakeLists.txt b/libsycl/CMakeLists.txt
new file mode 100644
index 0000000..fe08a42
--- /dev/null
+++ b/libsycl/CMakeLists.txt
@@ -0,0 +1,126 @@
+#===============================================================================
+# Setup Project
+#===============================================================================
+cmake_minimum_required(VERSION 3.20.0)
+
+set(LLVM_SUBPROJECT_TITLE "libsycl")
+
+set(LIBSYCL_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
+set(LIBSYCL_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
+
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED YES)
+set(CMAKE_CXX_EXTENSIONS OFF)
+
+#===============================================================================
+# Limitations
+#===============================================================================
+
+if (CMAKE_SYSTEM_NAME STREQUAL Windows AND NOT MSVC)
+# Build with other compilers is not configured, not guaranteed and not tested.
+ message(FATAL_ERROR
+ "When compiling for Windows, libsycl requires a"
+ " version of Microsoft Visual C++ or another compiler"
+ " that uses the Visual C++ cl command-line syntax.")
+endif()
+
+#===============================================================================
+# Setup CMake Options
+#===============================================================================
+
+option(LIBSYCL_ENABLE_WERROR "Treat all warnings as errors in the libsycl project" OFF)
+option(LIBSYCL_ENABLE_PEDANTIC "Compile with pedantic enabled." OFF)
+
+#===============================================================================
+# Configure System
+#===============================================================================
+
+set_property(GLOBAL PROPERTY USE_FOLDERS ON)
+
+set(LIBSYCL_SHARED_OUTPUT_NAME "sycl" CACHE STRING "Output name for the shared libsycl runtime library.")
+
+if(LLVM_ENABLE_PER_TARGET_RUNTIME_DIR AND NOT APPLE)
+ set(LIBSYCL_TARGET_SUBDIR ${LLVM_DEFAULT_TARGET_TRIPLE})
+ if(LIBSYCL_LIBDIR_SUBDIR)
+ string(APPEND LIBSYCL_TARGET_SUBDIR /${LIBSYCL_LIBDIR_SUBDIR})
+ endif()
+ cmake_path(NORMAL_PATH LIBSYCL_TARGET_SUBDIR)
+ set(LIBSYCL_LIBRARY_DIR ${LLVM_LIBRARY_OUTPUT_INTDIR}/${LIBSYCL_TARGET_SUBDIR})
+ set(LIBSYCL_INSTALL_LIBRARY_DIR lib${LLVM_LIBDIR_SUFFIX}/${LIBSYCL_TARGET_SUBDIR} CACHE STRING
+ "Path where built libsycl libraries should be installed.")
+ unset(LIBSYCL_TARGET_SUBDIR)
+else()
+ if(LLVM_LIBRARY_OUTPUT_INTDIR)
+ set(LIBSYCL_LIBRARY_DIR ${LLVM_LIBRARY_OUTPUT_INTDIR})
+ else()
+ set(LIBSYCL_LIBRARY_DIR ${CMAKE_BINARY_DIR}/lib${LIBSYCL_LIBDIR_SUFFIX})
+ endif()
+ set(LIBSYCL_INSTALL_LIBRARY_DIR lib${LIBSYCL_LIBDIR_SUFFIX} CACHE STRING
+ "Path where built libsycl libraries should be installed.")
+endif()
+
+set(LIBSYCL_INCLUDE_DIR include)
+set(LIBSYCL_BUILD_INCLUDE_DIR ${LLVM_BINARY_DIR}/${LIBSYCL_INCLUDE_DIR})
+set(LIBSYCL_SOURCE_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include)
+
+set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${LIBSYCL_LIBRARY_DIR})
+set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${LIBSYCL_LIBRARY_DIR})
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${LIBSYCL_LIBRARY_DIR})
+
+set(LIBSYCL_MAJOR_VERSION 0)
+set(LIBSYCL_MINOR_VERSION 1)
+set(LIBSYCL_PATCH_VERSION 0)
+set(LIBSYCL_VERSION_STRING "${LIBSYCL_MAJOR_VERSION}.${LIBSYCL_MINOR_VERSION}.${LIBSYCL_PATCH_VERSION}")
+set(LIBSYCL_ABI_NAMESPACE "__V${LIBSYCL_MAJOR_VERSION}" CACHE STRING
+ "The inline ABI namespace used by libsycl. It defaults to __Vn where `n` is the current ABI version.")
+if (NOT LIBSYCL_ABI_NAMESPACE MATCHES "__V.*")
+ message(FATAL_ERROR "LIBSYCL_ABI_NAMESPACE must be a reserved identifier, got '${LIBSYCL_ABI_NAMESPACE}'.")
+endif()
+
+#===============================================================================
+# Setup build & install rules
+#===============================================================================
+
+# Generate headers
+configure_file("${LIBSYCL_SOURCE_DIR}/src/version.hpp.in" "${LIBSYCL_BUILD_INCLUDE_DIR}/sycl/__impl/version.hpp")
+
+# Install generated headers.
+install(FILES
+ "${LIBSYCL_BUILD_INCLUDE_DIR}/sycl/__impl/version.hpp"
+ DESTINATION "${LIBSYCL_INCLUDE_DIR}/sycl/__impl"
+ COMPONENT sycl-headers)
+
+# This is a workaround to detect changes (add or modify) in subtree which
+# are not detected by copy_directory command.
+file(GLOB_RECURSE HEADERS_IN_SYCL_DIR CONFIGURE_DEPENDS "${LIBSYCL_SOURCE_INCLUDE_DIR}/sycl/*")
+file(GLOB_RECURSE HEADERS_IN_CL_DIR CONFIGURE_DEPENDS "${LIBSYCL_SOURCE_INCLUDE_DIR}/CL/*")
+
+string(REPLACE "${LIBSYCL_SOURCE_INCLUDE_DIR}" "${LIBSYCL_BUILD_INCLUDE_DIR}"
+ OUT_HEADERS_IN_SYCL_DIR "${HEADERS_IN_SYCL_DIR}")
+string(REPLACE "${LIBSYCL_SOURCE_INCLUDE_DIR}/CL" "${LIBSYCL_BUILD_INCLUDE_DIR}/CL"
+ OUT_HEADERS_IN_CL_DIR "${HEADERS_IN_CL_DIR}")
+
+# Copy SYCL headers from sources to build directory
+add_custom_target(sycl-headers
+ DEPENDS ${OUT_HEADERS_IN_SYCL_DIR}
+ ${OUT_HEADERS_IN_CL_DIR})
+
+add_custom_command(
+ OUTPUT ${OUT_HEADERS_IN_SYCL_DIR}
+ ${OUT_HEADERS_IN_CL_DIR}
+ DEPENDS ${HEADERS_IN_SYCL_DIR}
+ ${HEADERS_IN_CL_DIR}
+ COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBSYCL_SOURCE_INCLUDE_DIR}/sycl ${LIBSYCL_BUILD_INCLUDE_DIR}/sycl
+ COMMAND ${CMAKE_COMMAND} -E copy_directory ${LIBSYCL_SOURCE_INCLUDE_DIR}/CL ${LIBSYCL_BUILD_INCLUDE_DIR}/CL
+ COMMENT "Copying SYCL headers...")
+
+install(DIRECTORY "${LIBSYCL_SOURCE_INCLUDE_DIR}/sycl" DESTINATION ${LIBSYCL_INCLUDE_DIR} COMPONENT sycl-headers)
+install(DIRECTORY "${LIBSYCL_SOURCE_INCLUDE_DIR}/CL" DESTINATION ${LIBSYCL_INCLUDE_DIR} COMPONENT sycl-headers)
+
+set(LIBSYCL_RT_LIBS ${LIBSYCL_SHARED_OUTPUT_NAME})
+
+add_subdirectory(src)
+
+add_custom_target(libsycl-runtime-libraries
+ DEPENDS ${LIBSYCL_RT_LIBS}
+)
diff --git a/libsycl/LICENSE.txt b/libsycl/LICENSE.txt
new file mode 100644
index 0000000..5715176
--- /dev/null
+++ b/libsycl/LICENSE.txt
@@ -0,0 +1,278 @@
+==============================================================================
+The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
+==============================================================================
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+---- LLVM Exceptions to the Apache 2.0 License ----
+
+As an exception, if, as a result of your compiling your source code, portions
+of this Software are embedded into an Object form of such source code, you
+may redistribute such embedded portions in such Object form without complying
+with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
+
+In addition, if you combine or link compiled forms of this Software with
+software that is licensed under the GPLv2 ("Combined Software") and if a
+court of competent jurisdiction determines that the patent provision (Section
+3), the indemnity provision (Section 9) or other Section of the License
+conflicts with the conditions of the GPLv2, you may retroactively and
+prospectively choose to deem waived or otherwise exclude such Section(s) of
+the License, but only in their entirety and only with respect to the Combined
+Software.
+
+==============================================================================
+Software from third parties included in the LLVM Project:
+==============================================================================
+The LLVM Project contains third party software which is under different license
+terms. All such code will be identified clearly using at least one of two
+mechanisms:
+1) It will be in a separate directory tree with its own `LICENSE.txt` or
+ `LICENSE` file at the top containing the specific license and restrictions
+ which apply to that software, or
+2) It will contain specific license and restriction terms at the top of every
+ file.
+
+==============================================================================
+Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign.
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
diff --git a/libsycl/README.md b/libsycl/README.md
new file mode 100644
index 0000000..9fed3fd
--- /dev/null
+++ b/libsycl/README.md
@@ -0,0 +1,20 @@
+## SYCL runtime library
+
+The libsycl subproject is an implementation of the SYCL runtime library as defined by the
+[SYCL 2020 specification](https://registry.khronos.org/SYCL/specs/sycl-2020/html/sycl-2020.html).
+
+Subproject documentation is available at: [SYCL RT documentation](./docs).
+
+libsycl runtime library and headers require C++17 support or higher.
+
+### How to use libsycl with Clang
+
+TODO
+
+#### How to build
+
+TODO
+
+# License
+
+See [LICENSE](./LICENSE.TXT) for details.
diff --git a/libsycl/docs/index.rst b/libsycl/docs/index.rst
new file mode 100644
index 0000000..78e76e7
--- /dev/null
+++ b/libsycl/docs/index.rst
@@ -0,0 +1,79 @@
+=====================
+SYCL runtime implementation
+=====================
+
+.. contents::
+ :local:
+
+.. _index:
+
+Current Status
+========
+
+The implementation is in the very early stages of upstreaming. The first milestone is to get
+support for a simple SYCL application with device code using Unified Shared Memory:
+
+.. code-block:: c++
+
+ #include <sycl/sycl.hpp>
+
+ class TestKernel;
+
+ int main() {
+ sycl::queue q;
+
+ const size_t dataSize = 32;
+ int *dataPtr = sycl::malloc_shared<int>(32, q);
+ for (int i = 0; i < dataSize; ++i)
+ dataPtr[i] = 0;
+
+ q.submit([&](sycl::handler &cgh) {
+ cgh.parallel_for<TestKernel>(
+ sycl::range<1>(dataSize),
+ [=](sycl::id<1> idx) { dataPtr[idx] = idx[0]; });
+ });
+ q.wait();
+
+ bool error = false;
+ for (int i = 0; i < dataSize; ++i)
+ if (dataPtr[i] != i) error = true;
+
+ free(dataPtr, q);
+
+ return error;
+ }
+
+This requires at least partial support of the following functionality on the libsycl side:
+ * ``sycl::platform`` class
+ * ``sycl::device`` class
+ * ``sycl::context`` class
+ * ``sycl::queue`` class
+ * ``sycl::handler`` class
+ * ``sycl::id`` and ``sycl::range`` classes
+ * Unified shared memory allocation/deallocation
+ * Program manager, an internal component for retrieving and using device images from the multi-architectural binaries
+
+Build steps
+========
+
+To build LLVM with libsycl runtime enabled the following script can be used.
+
+.. code-block:: console
+
+ #!/bin/sh
+
+ build_llvm=`pwd`/build-llvm
+ installprefix=`pwd`/install
+ llvm=`pwd`
+ mkdir -p $build_llvm
+ mkdir -p $installprefix
+
+ cmake -G Ninja -S $llvm/llvm -B $build_llvm \
+ -DLLVM_ENABLE_PROJECTS="clang;clang-tools-extra" \
+ -DLLVM_INSTALL_UTILS=ON \
+ -DCMAKE_INSTALL_PREFIX=$installprefix \
+ -DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libsycl;libunwind" \
+ -DCMAKE_BUILD_TYPE=Release
+
+ ninja -C $build_llvm install
+ \ No newline at end of file
diff --git a/libsycl/include/CL/sycl.hpp b/libsycl/include/CL/sycl.hpp
new file mode 100644
index 0000000..7c5de57
--- /dev/null
+++ b/libsycl/include/CL/sycl.hpp
@@ -0,0 +1,30 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a SYCL 1.2.1 standard header file. Deprecated.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBSYCL_CL_SYCL_HPP
+#define _LIBSYCL_CL_SYCL_HPP
+
+#if defined(__clang__)
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wpedantic"
+# warning "CL/sycl.hpp is deprecated, use sycl/sycl.hpp"
+# pragma clang diagnostic pop
+#endif
+
+#include <sycl/sycl.hpp>
+
+namespace cl {
+namespace sycl = ::sycl;
+}
+
+#endif // _LIBSYCL_CL_SYCL_HPP
diff --git a/libsycl/include/sycl/__impl/detail/config.hpp b/libsycl/include/sycl/__impl/detail/config.hpp
new file mode 100644
index 0000000..cc90597
--- /dev/null
+++ b/libsycl/include/sycl/__impl/detail/config.hpp
@@ -0,0 +1,59 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the declaration of the macros defining attributes for
+/// exported methods and defining API namespaces.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBSYCL___IMPL_DETAIL_CONFIG_HPP
+#define _LIBSYCL___IMPL_DETAIL_CONFIG_HPP
+
+#include <sycl/__impl/version.hpp>
+
+#define _LIBSYCL_BEGIN_UNVERSIONED_NAMESPACE_SYCL namespace sycl {
+#define _LIBSYCL_END_UNVERSIONED_NAMESPACE_SYCL }
+
+#define _LIBSYCL_BEGIN_NAMESPACE_SYCL \
+ _LIBSYCL_BEGIN_UNVERSIONED_NAMESPACE_SYCL inline namespace _LIBSYCL_ABI_NAMESPACE {
+#define _LIBSYCL_END_NAMESPACE_SYCL \
+ } \
+ _LIBSYCL_END_UNVERSIONED_NAMESPACE_SYCL
+
+#ifndef __SYCL_DEVICE_ONLY__
+
+# ifndef _LIBSYCL_EXPORT
+# ifdef _WIN32
+
+# define _LIBSYCL_DLL_LOCAL
+
+# ifdef _LIBSYCL_BUILDING_LIBRARY
+# define _LIBSYCL_EXPORT __declspec(dllexport)
+# else
+# define _LIBSYCL_EXPORT __declspec(dllimport)
+# endif //_LIBSYCL_BUILDING_LIBRARY
+
+# else // _WIN32
+
+# define _LIBSYCL_DLL_LOCAL [[__gnu__::__visibility__("hidden")]]
+# define _LIBSYCL_EXPORT [[__gnu__::__visibility__("default")]]
+
+# endif // _WIN32
+# endif // _LIBSYCL_EXPORT
+
+#else // __SYCL_DEVICE_ONLY__
+
+# ifndef _LIBSYCL_EXPORT
+# define _LIBSYCL_EXPORT
+# define _LIBSYCL_DLL_LOCAL
+# endif
+
+#endif // __SYCL_DEVICE_ONLY__
+
+#endif // _LIBSYCL___IMPL_DETAIL_CONFIG_HPP
diff --git a/libsycl/include/sycl/__impl/platform.hpp b/libsycl/include/sycl/__impl/platform.hpp
new file mode 100644
index 0000000..bac59ac
--- /dev/null
+++ b/libsycl/include/sycl/__impl/platform.hpp
@@ -0,0 +1,31 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the declaration of the SYCL platform class, which
+/// encapsulates a single platform on which kernel functions may be executed.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBSYCL___IMPL_PLATFORM_HPP
+#define _LIBSYCL___IMPL_PLATFORM_HPP
+
+#include <sycl/__impl/detail/config.hpp>
+
+_LIBSYCL_BEGIN_NAMESPACE_SYCL
+
+class _LIBSYCL_EXPORT platform {
+public:
+ /// Constructs a SYCL platform which contains the default device.
+ platform();
+
+}; // class platform
+
+_LIBSYCL_END_NAMESPACE_SYCL
+
+#endif // _LIBSYCL___IMPL_PLATFORM_HPP
diff --git a/libsycl/include/sycl/sycl.hpp b/libsycl/include/sycl/sycl.hpp
new file mode 100644
index 0000000..76399eba
--- /dev/null
+++ b/libsycl/include/sycl/sycl.hpp
@@ -0,0 +1,19 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a SYCL 2020 standard header file.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBSYCL_SYCL_HPP
+#define _LIBSYCL_SYCL_HPP
+
+#include <sycl/__impl/platform.hpp>
+
+#endif // _LIBSYCL_SYCL_HPP
diff --git a/libsycl/src/CMakeLists.txt b/libsycl/src/CMakeLists.txt
new file mode 100644
index 0000000..206b856
--- /dev/null
+++ b/libsycl/src/CMakeLists.txt
@@ -0,0 +1,98 @@
+list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../../runtimes/cmake/Modules")
+include(WarningFlags)
+
+function(add_sycl_rt_library LIB_TARGET_NAME LIB_OBJ_NAME LIB_OUTPUT_NAME)
+ if (NOT LLVM_ENABLE_PIC)
+ message( FATAL_ERROR "Position-Independent Code generation is required for libsycl shared library" )
+ endif()
+
+ cmake_parse_arguments(ARG "" "" "COMPILE_OPTIONS;SOURCES" ${ARGN})
+
+ add_library(${LIB_OBJ_NAME} OBJECT ${ARG_SOURCES})
+
+ # Common compilation step setup
+ target_compile_definitions(${LIB_OBJ_NAME} PRIVATE
+ $<$<STREQUAL:${CMAKE_SYSTEM_NAME},Windows>:_LIBSYCL_BUILDING_LIBRARY>)
+ cxx_add_warning_flags(${LIB_OBJ_NAME} ${LIBSYCL_ENABLE_WERROR} ${LIBSYCL_ENABLE_PEDANTIC})
+
+ target_include_directories(
+ ${LIB_OBJ_NAME}
+ PRIVATE
+ ${CMAKE_CURRENT_SOURCE_DIR}
+ ${LIBSYCL_BUILD_INCLUDE_DIR}
+ )
+
+ add_library(${LIB_TARGET_NAME} SHARED
+ $<TARGET_OBJECTS:${LIB_OBJ_NAME}>)
+
+ add_dependencies(${LIB_OBJ_NAME}
+ sycl-headers
+ )
+
+ set_target_properties(${LIB_TARGET_NAME} PROPERTIES LINKER_LANGUAGE CXX)
+
+ if (CMAKE_SYSTEM_NAME STREQUAL Windows)
+ # Install stripped PDB
+ set(PDB_FILENAME "${LIB_TARGET_NAME}$<$<CONFIG:Debug>:d>")
+ check_linker_flag(CXX "LINKER:/PDBSTRIPPED:${PDB_FILENAME}.stripped.pdb"
+ LINKER_SUPPORTS_PDBSTRIPPED)
+ if(LINKER_SUPPORTS_PDBSTRIPPED)
+ target_link_options(${LIB_TARGET_NAME}
+ PRIVATE "LINKER:/PDBSTRIPPED:${PDB_FILENAME}.stripped.pdb")
+ install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PDB_FILENAME}.stripped.pdb"
+ DESTINATION ${CMAKE_INSTALL_PREFIX}/bin
+ RENAME "${PDB_FILENAME}.pdb"
+ COMPONENT ${LIB_TARGET_NAME}
+ OPTIONAL)
+ endif()
+
+ target_compile_options(${LIB_OBJ_NAME} PUBLIC /EHsc)
+ else()
+ target_compile_options(${LIB_OBJ_NAME} PUBLIC
+ -fvisibility=hidden -fvisibility-inlines-hidden)
+
+ if (UNIX AND NOT APPLE)
+ set(linker_script "${CMAKE_CURRENT_SOURCE_DIR}/ld-version-script.txt")
+ target_link_libraries(
+ ${LIB_TARGET_NAME} PRIVATE "-Wl,--version-script=${linker_script}")
+ set_target_properties(${LIB_TARGET_NAME} PROPERTIES LINK_DEPENDS ${linker_script})
+ endif()
+ endif()
+
+ find_package(Threads REQUIRED)
+
+ target_link_libraries(${LIB_TARGET_NAME}
+ PRIVATE
+ ${CMAKE_DL_LIBS}
+ ${CMAKE_THREAD_LIBS_INIT}
+ )
+
+ set_target_properties(${LIB_TARGET_NAME} PROPERTIES
+ VERSION ${LIBSYCL_VERSION_STRING}
+ SOVERSION ${LIBSYCL_VERSION_STRING})
+ set_target_properties(${LIB_TARGET_NAME} PROPERTIES OUTPUT_NAME ${LIB_OUTPUT_NAME})
+endfunction(add_sycl_rt_library)
+
+set(LIBSYCL_SOURCES
+ "platform.cpp"
+)
+
+set(LIB_NAME "sycl")
+set(LIB_OUTPUT_NAME "${LIB_NAME}")
+if (CMAKE_SYSTEM_NAME STREQUAL Windows)
+ if (CMAKE_MSVC_RUNTIME_LIBRARY AND (NOT CMAKE_MSVC_RUNTIME_LIBRARY MATCHES "DLL$"))
+ message(FATAL_ERROR "libsycl requires a DLL version of the MSVC CRT.")
+ endif()
+ if ((NOT CMAKE_MSVC_RUNTIME_LIBRARY AND uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG")
+ OR (CMAKE_MSVC_RUNTIME_LIBRARY STREQUAL "MultiThreadedDebugDLL"))
+ set(LIB_OUTPUT_NAME "${LIB_OUTPUT_NAME}d")
+ endif()
+endif()
+
+add_sycl_rt_library(${LIB_NAME} sycl_object ${LIB_OUTPUT_NAME}
+ SOURCES ${LIBSYCL_SOURCES})
+
+install(TARGETS ${LIBSYCL_RT_LIBS}
+ ARCHIVE DESTINATION "lib${LLVM_LIBDIR_SUFFIX}" COMPONENT libsycl
+ LIBRARY DESTINATION "lib${LLVM_LIBDIR_SUFFIX}" COMPONENT libsycl
+ RUNTIME DESTINATION "bin" COMPONENT libsycl)
diff --git a/libsycl/src/ld-version-script.txt b/libsycl/src/ld-version-script.txt
new file mode 100644
index 0000000..a347d20
--- /dev/null
+++ b/libsycl/src/ld-version-script.txt
@@ -0,0 +1,20 @@
+{
+ /* Symbols to be exported are selected based on mangled names rather than */
+ /* the demangled names provided by the `extern "C++"` matcher because it is */
+ /* easy to express "export everything defined in the sycl namespace" using */
+ /* the former. Matching demangled names is more complicated in the presence */
+ /* of examples like: */
+ /* "vtable for sycl::foo" (should be exported) */
+ /* "vtable for std::__internal<sycl::foo>" (should not be exported) */
+
+ global:
+ /* Export everything from sycl namespace */
+ _ZNK4sycl*; /* function */
+ _ZN4sycl*; /* function */
+ _ZTIN4sycl*; /* typeinfo */
+ _ZTSN4sycl*; /* typeinfo name */
+ _ZTVN4sycl*; /* vtable */
+
+ local:
+ *;
+};
diff --git a/libsycl/src/platform.cpp b/libsycl/src/platform.cpp
new file mode 100644
index 0000000..b5d6517
--- /dev/null
+++ b/libsycl/src/platform.cpp
@@ -0,0 +1,17 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <sycl/__impl/platform.hpp>
+
+#include <stdexcept>
+
+_LIBSYCL_BEGIN_NAMESPACE_SYCL
+
+platform::platform() { throw std::runtime_error("Unimplemented"); }
+
+_LIBSYCL_END_NAMESPACE_SYCL
diff --git a/libsycl/src/version.hpp.in b/libsycl/src/version.hpp.in
new file mode 100644
index 0000000..a721576
--- /dev/null
+++ b/libsycl/src/version.hpp.in
@@ -0,0 +1,16 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the declaration of SYCL RT version macro.
+///
+//===----------------------------------------------------------------------===//
+#define _LIBSYCL_MAJOR_VERSION ${LIBSYCL_MAJOR_VERSION}
+#define _LIBSYCL_MINOR_VERSION ${LIBSYCL_MINOR_VERSION}
+#define _LIBSYCL_PATCH_VERSION ${LIBSYCL_PATCH_VERSION}
+#define _LIBSYCL_ABI_NAMESPACE ${LIBSYCL_ABI_NAMESPACE}
diff --git a/lld/COFF/Chunks.cpp b/lld/COFF/Chunks.cpp
index 01752cd..39fc250 100644
--- a/lld/COFF/Chunks.cpp
+++ b/lld/COFF/Chunks.cpp
@@ -422,12 +422,6 @@ void SectionChunk::writeTo(uint8_t *buf) const {
applyRelocation(buf + rel.VirtualAddress, rel);
}
-
- // Write the offset to EC entry thunk preceding section contents. The low bit
- // is always set, so it's effectively an offset from the last byte of the
- // offset.
- if (Defined *entryThunk = getEntryThunk())
- write32le(buf - sizeof(uint32_t), entryThunk->getRVA() - rva + 1);
}
void SectionChunk::applyRelocation(uint8_t *off,
@@ -881,6 +875,19 @@ void RangeExtensionThunkARM64::writeTo(uint8_t *buf) const {
applyArm64Imm(buf + 4, target->getRVA() & 0xfff, 0);
}
+void SameAddressThunkARM64EC::setDynamicRelocs(COFFLinkerContext &ctx) const {
+ // Add ARM64X relocations replacing adrp/add instructions with a version using
+ // the hybrid target.
+ RangeExtensionThunkARM64 hybridView(ARM64EC, hybridTarget);
+ uint8_t buf[sizeof(arm64Thunk)];
+ hybridView.setRVA(rva);
+ hybridView.writeTo(buf);
+ uint32_t addrp = *reinterpret_cast<ulittle32_t *>(buf);
+ uint32_t add = *reinterpret_cast<ulittle32_t *>(buf + sizeof(uint32_t));
+ ctx.dynamicRelocs->set(this, addrp);
+ ctx.dynamicRelocs->set(Arm64XRelocVal(this, sizeof(uint32_t)), add);
+}
+
LocalImportChunk::LocalImportChunk(COFFLinkerContext &c, Defined *s)
: sym(s), ctx(c) {
setAlignment(ctx.config.wordsize);
@@ -1264,7 +1271,8 @@ void DynamicRelocsChunk::finalize() {
}
// Set the reloc value. The reloc entry must be allocated beforehand.
-void DynamicRelocsChunk::set(uint32_t rva, Arm64XRelocVal value) {
+void DynamicRelocsChunk::set(Arm64XRelocVal offset, Arm64XRelocVal value) {
+ uint32_t rva = offset.get();
auto entry =
llvm::find_if(arm64xRelocs, [rva](const Arm64XDynamicRelocEntry &e) {
return e.offset.get() == rva;
diff --git a/lld/COFF/Chunks.h b/lld/COFF/Chunks.h
index d03a64c..6d88f5e 100644
--- a/lld/COFF/Chunks.h
+++ b/lld/COFF/Chunks.h
@@ -193,6 +193,8 @@ public:
// allowed ranges. Return the additional space required for the extension.
virtual uint32_t extendRanges() { return 0; };
+ virtual Defined *getEntryThunk() const { return nullptr; };
+
static bool classof(const Chunk *c) { return c->kind() >= OtherKind; }
protected:
@@ -633,7 +635,7 @@ public:
bool verifyRanges() override;
uint32_t extendRanges() override;
- Defined *exitThunk;
+ Defined *exitThunk = nullptr;
Defined *sym = nullptr;
bool extended = false;
@@ -675,6 +677,26 @@ private:
MachineTypes machine;
};
+// A chunk used to guarantee the same address for a function in both views of
+// a hybrid image. Similar to RangeExtensionThunkARM64 chunks, it calls the
+// target symbol using a BR instruction. It also contains an entry thunk for EC
+// compatibility and additional ARM64X relocations that swap targets between
+// views.
+class SameAddressThunkARM64EC : public RangeExtensionThunkARM64 {
+public:
+ explicit SameAddressThunkARM64EC(Defined *t, Defined *hybridTarget,
+ Defined *entryThunk)
+ : RangeExtensionThunkARM64(ARM64EC, t), hybridTarget(hybridTarget),
+ entryThunk(entryThunk) {}
+
+ Defined *getEntryThunk() const override { return entryThunk; }
+ void setDynamicRelocs(COFFLinkerContext &ctx) const;
+
+private:
+ Defined *hybridTarget;
+ Defined *entryThunk;
+};
+
// Windows-specific.
// See comments for DefinedLocalImport class.
class LocalImportChunk : public NonSectionChunk {
@@ -843,13 +865,13 @@ class Arm64XRelocVal {
public:
Arm64XRelocVal(uint64_t value = 0) : value(value) {}
Arm64XRelocVal(Defined *sym, int32_t offset = 0) : sym(sym), value(offset) {}
- Arm64XRelocVal(Chunk *chunk, int32_t offset = 0)
+ Arm64XRelocVal(const Chunk *chunk, int32_t offset = 0)
: chunk(chunk), value(offset) {}
uint64_t get() const;
private:
Defined *sym = nullptr;
- Chunk *chunk = nullptr;
+ const Chunk *chunk = nullptr;
uint64_t value;
};
@@ -884,7 +906,7 @@ public:
arm64xRelocs.emplace_back(type, size, offset, value);
}
- void set(uint32_t rva, Arm64XRelocVal value);
+ void set(Arm64XRelocVal offset, Arm64XRelocVal value);
private:
std::vector<Arm64XDynamicRelocEntry> arm64xRelocs;
@@ -940,6 +962,8 @@ inline bool Chunk::isHotPatchable() const {
inline Defined *Chunk::getEntryThunk() const {
if (auto *c = dyn_cast<const SectionChunkEC>(this))
return c->entryThunk;
+ if (auto *c = dyn_cast<const NonSectionChunk>(this))
+ return c->getEntryThunk();
return nullptr;
}
diff --git a/lld/COFF/Config.h b/lld/COFF/Config.h
index 91b6e63..a03bb57 100644
--- a/lld/COFF/Config.h
+++ b/lld/COFF/Config.h
@@ -223,6 +223,9 @@ struct Configuration {
StringRef manifestUIAccess = "'false'";
StringRef manifestFile;
+ // used for /arm64xsameaddress
+ std::vector<std::pair<Symbol *, Symbol *>> sameAddresses;
+
// used for /dwodir
StringRef dwoDir;
diff --git a/lld/COFF/Driver.cpp b/lld/COFF/Driver.cpp
index 570b8f9..7580b469 100644
--- a/lld/COFF/Driver.cpp
+++ b/lld/COFF/Driver.cpp
@@ -500,7 +500,9 @@ void LinkerDriver::parseDirectives(InputFile *file) {
file->symtab.parseAlternateName(arg->getValue());
break;
case OPT_arm64xsameaddress:
- if (!file->symtab.isEC())
+ if (file->symtab.isEC())
+ parseSameAddress(arg->getValue());
+ else
Warn(ctx) << arg->getSpelling()
<< " is not allowed in non-ARM64EC files (" << toString(file)
<< ")";
@@ -1318,13 +1320,9 @@ void LinkerDriver::convertResources() {
}
void LinkerDriver::maybeCreateECExportThunk(StringRef name, Symbol *&sym) {
- Defined *def;
if (!sym)
return;
- if (auto undef = dyn_cast<Undefined>(sym))
- def = undef->getDefinedWeakAlias();
- else
- def = dyn_cast<Defined>(sym);
+ Defined *def = sym->getDefined();
if (!def)
return;
@@ -1356,11 +1354,7 @@ void LinkerDriver::createECExportThunks() {
Symbol *sym = ctx.symtab.find(targetName);
if (!sym)
continue;
- Defined *targetSym;
- if (auto undef = dyn_cast<Undefined>(sym))
- targetSym = undef->getDefinedWeakAlias();
- else
- targetSym = dyn_cast<Defined>(sym);
+ Defined *targetSym = sym->getDefined();
if (!targetSym)
continue;
@@ -2303,6 +2297,13 @@ void LinkerDriver::linkerMain(ArrayRef<const char *> argsArr) {
args.filtered(OPT_dependentloadflag, OPT_dependentloadflag_opt))
parseDependentLoadFlags(arg);
+ for (auto *arg : args.filtered(OPT_arm64xsameaddress)) {
+ if (ctx.hybridSymtab)
+ parseSameAddress(arg->getValue());
+ else
+ Warn(ctx) << arg->getSpelling() << " is allowed only on EC targets";
+ }
+
if (tar) {
llvm::TimeTraceScope timeScope("Reproducer: response file");
tar->append(
@@ -2676,12 +2677,46 @@ void LinkerDriver::linkerMain(ArrayRef<const char *> argsArr) {
createECExportThunks();
// Resolve remaining undefined symbols and warn about imported locals.
+ std::vector<Undefined *> aliases;
ctx.forEachSymtab(
- [&](SymbolTable &symtab) { symtab.resolveRemainingUndefines(); });
+ [&](SymbolTable &symtab) { symtab.resolveRemainingUndefines(aliases); });
if (errorCount())
return;
+ ctx.forEachActiveSymtab([](SymbolTable &symtab) {
+ symtab.initializeECThunks();
+ symtab.initializeLoadConfig();
+ });
+
+ // Identify unreferenced COMDAT sections.
+ if (config->doGC) {
+ if (config->mingw) {
+ // markLive doesn't traverse .eh_frame, but the personality function is
+ // only reached that way. The proper solution would be to parse and
+ // traverse the .eh_frame section, like the ELF linker does.
+ // For now, just manually try to retain the known possible personality
+ // functions. This doesn't bring in more object files, but only marks
+ // functions that already have been included to be retained.
+ ctx.forEachSymtab([&](SymbolTable &symtab) {
+ for (const char *n : {"__gxx_personality_v0", "__gcc_personality_v0",
+ "rust_eh_personality"}) {
+ Defined *d = dyn_cast_or_null<Defined>(symtab.findUnderscore(n));
+ if (d && !d->isGCRoot) {
+ d->isGCRoot = true;
+ config->gcroot.push_back(d);
+ }
+ }
+ });
+ }
+
+ markLive(ctx);
+ }
+
+ ctx.symtab.initializeSameAddressThunks();
+ for (auto alias : aliases)
+ alias->resolveWeakAlias();
+
if (config->mingw) {
// Make sure the crtend.o object is the last object file. This object
// file can contain terminating section chunks that need to be placed
@@ -2773,35 +2808,6 @@ void LinkerDriver::linkerMain(ArrayRef<const char *> argsArr) {
if (auto *arg = args.getLastArg(OPT_print_symbol_order))
config->printSymbolOrder = arg->getValue();
- if (ctx.symtab.isEC())
- ctx.symtab.initializeECThunks();
- ctx.forEachActiveSymtab(
- [](SymbolTable &symtab) { symtab.initializeLoadConfig(); });
-
- // Identify unreferenced COMDAT sections.
- if (config->doGC) {
- if (config->mingw) {
- // markLive doesn't traverse .eh_frame, but the personality function is
- // only reached that way. The proper solution would be to parse and
- // traverse the .eh_frame section, like the ELF linker does.
- // For now, just manually try to retain the known possible personality
- // functions. This doesn't bring in more object files, but only marks
- // functions that already have been included to be retained.
- ctx.forEachSymtab([&](SymbolTable &symtab) {
- for (const char *n : {"__gxx_personality_v0", "__gcc_personality_v0",
- "rust_eh_personality"}) {
- Defined *d = dyn_cast_or_null<Defined>(symtab.findUnderscore(n));
- if (d && !d->isGCRoot) {
- d->isGCRoot = true;
- config->gcroot.push_back(d);
- }
- }
- });
- }
-
- markLive(ctx);
- }
-
// Needs to happen after the last call to addFile().
convertResources();
diff --git a/lld/COFF/Driver.h b/lld/COFF/Driver.h
index 5a9bd5c..b500ac8 100644
--- a/lld/COFF/Driver.h
+++ b/lld/COFF/Driver.h
@@ -214,6 +214,8 @@ private:
void parsePDBPageSize(StringRef);
void parseSection(StringRef);
+ void parseSameAddress(StringRef);
+
// Parses a MS-DOS stub file
void parseDosStub(StringRef path);
diff --git a/lld/COFF/DriverUtils.cpp b/lld/COFF/DriverUtils.cpp
index d8b41c7..dc4039f 100644
--- a/lld/COFF/DriverUtils.cpp
+++ b/lld/COFF/DriverUtils.cpp
@@ -328,6 +328,22 @@ void LinkerDriver::parseSwaprun(StringRef arg) {
} while (!arg.empty());
}
+void LinkerDriver::parseSameAddress(StringRef arg) {
+ auto mangledName = getArm64ECMangledFunctionName(arg);
+ Symbol *sym = ctx.symtab.addUndefined(mangledName ? *mangledName : arg);
+
+ // MSVC appears to generate thunks even for non-hybrid ARM64EC images.
+ // As a side effect, the native symbol is pulled in. Since this is used
+ // in the CRT for thread-local constructors, it results in the image
+ // containing unnecessary native code. As these thunks don't appear to
+ // be useful, we limit this behavior to actual hybrid targets. This may
+ // change if compatibility becomes necessary.
+ if (ctx.config.machine != ARM64X)
+ return;
+ Symbol *nativeSym = ctx.hybridSymtab->addUndefined(arg);
+ ctx.config.sameAddresses.emplace_back(sym, nativeSym);
+}
+
// An RAII temporary file class that automatically removes a temporary file.
namespace {
class TemporaryFile {
diff --git a/lld/COFF/MarkLive.cpp b/lld/COFF/MarkLive.cpp
index f40810c..78f5030 100644
--- a/lld/COFF/MarkLive.cpp
+++ b/lld/COFF/MarkLive.cpp
@@ -49,7 +49,10 @@ void markLive(COFFLinkerContext &ctx) {
addSym(file->impchkThunk->exitThunk);
};
- addSym = [&](Symbol *b) {
+ addSym = [&](Symbol *s) {
+ Defined *b = s->getDefined();
+ if (!b)
+ return;
if (auto *sym = dyn_cast<DefinedRegular>(b)) {
enqueue(sym->getChunk());
} else if (auto *sym = dyn_cast<DefinedImportData>(b)) {
diff --git a/lld/COFF/Options.td b/lld/COFF/Options.td
index 0d66b49..2c393cc 100644
--- a/lld/COFF/Options.td
+++ b/lld/COFF/Options.td
@@ -31,6 +31,9 @@ multiclass B_priv<string name> {
def align : P<"align", "Section alignment">;
def aligncomm : P<"aligncomm", "Set common symbol alignment">;
def alternatename : P<"alternatename", "Define weak alias">;
+def arm64xsameaddress
+ : P<"arm64xsameaddress", "Generate a thunk for the symbol with the same "
+ "address in both native and EC views on ARM64X.">;
def base : P<"base", "Base address of the program">;
def color_diagnostics: Flag<["--"], "color-diagnostics">,
HelpText<"Alias for --color-diagnostics=always">;
@@ -373,4 +376,3 @@ def tlbid : P_priv<"tlbid">;
def tlbout : P_priv<"tlbout">;
def verbose_all : P_priv<"verbose">;
def guardsym : P_priv<"guardsym">;
-def arm64xsameaddress : P_priv<"arm64xsameaddress">;
diff --git a/lld/COFF/SymbolTable.cpp b/lld/COFF/SymbolTable.cpp
index 189e75d..de04cdf 100644
--- a/lld/COFF/SymbolTable.cpp
+++ b/lld/COFF/SymbolTable.cpp
@@ -452,7 +452,7 @@ void SymbolTable::reportUnresolvable() {
reportProblemSymbols(undefs, /*localImports=*/nullptr, true);
}
-void SymbolTable::resolveRemainingUndefines() {
+void SymbolTable::resolveRemainingUndefines(std::vector<Undefined *> &aliases) {
llvm::TimeTraceScope timeScope("Resolve remaining undefined symbols");
SmallPtrSet<Symbol *, 8> undefs;
DenseMap<Symbol *, Symbol *> localImports;
@@ -468,8 +468,10 @@ void SymbolTable::resolveRemainingUndefines() {
StringRef name = undef->getName();
// A weak alias may have been resolved, so check for that.
- if (undef->resolveWeakAlias())
+ if (undef->getWeakAlias()) {
+ aliases.push_back(undef);
continue;
+ }
// If we can resolve a symbol by removing __imp_ prefix, do that.
// This odd rule is for compatibility with MSVC linker.
@@ -620,10 +622,10 @@ void SymbolTable::initializeECThunks() {
return;
for (auto it : entryThunks) {
- auto *to = dyn_cast<Defined>(it.second);
+ Defined *to = it.second->getDefined();
if (!to)
continue;
- auto *from = dyn_cast<DefinedRegular>(it.first);
+ auto *from = dyn_cast_or_null<DefinedRegular>(it.first->getDefined());
// We need to be able to add padding to the function and fill it with an
// offset to its entry thunks. To ensure that padding the function is
// feasible, functions are required to be COMDAT symbols with no offset.
@@ -642,7 +644,8 @@ void SymbolTable::initializeECThunks() {
Symbol *sym = exitThunks.lookup(file->thunkSym);
if (!sym)
sym = exitThunks.lookup(file->impECSym);
- file->impchkThunk->exitThunk = dyn_cast_or_null<Defined>(sym);
+ if (sym)
+ file->impchkThunk->exitThunk = sym->getDefined();
}
// On ARM64EC, the __imp_ symbol references the auxiliary IAT, while the
@@ -659,6 +662,35 @@ void SymbolTable::initializeECThunks() {
});
}
+void SymbolTable::initializeSameAddressThunks() {
+ for (auto iter : ctx.config.sameAddresses) {
+ auto sym = dyn_cast_or_null<DefinedRegular>(iter.first->getDefined());
+ if (!sym || !sym->isLive())
+ continue;
+ auto nativeSym =
+ dyn_cast_or_null<DefinedRegular>(iter.second->getDefined());
+ if (!nativeSym || !nativeSym->isLive())
+ continue;
+ Defined *entryThunk = sym->getChunk()->getEntryThunk();
+ if (!entryThunk)
+ continue;
+
+ // Replace symbols with symbols referencing the thunk. Store the original
+ // symbol as equivalent DefinedSynthetic instances for use in the thunk
+ // itself.
+ auto symClone = make<DefinedSynthetic>(sym->getName(), sym->getChunk(),
+ sym->getValue());
+ auto nativeSymClone = make<DefinedSynthetic>(
+ nativeSym->getName(), nativeSym->getChunk(), nativeSym->getValue());
+ SameAddressThunkARM64EC *thunk =
+ make<SameAddressThunkARM64EC>(nativeSymClone, symClone, entryThunk);
+ sameAddressThunks.push_back(thunk);
+
+ replaceSymbol<DefinedSynthetic>(sym, sym->getName(), thunk);
+ replaceSymbol<DefinedSynthetic>(nativeSym, nativeSym->getName(), thunk);
+ }
+}
+
Symbol *SymbolTable::addUndefined(StringRef name, InputFile *f,
bool overrideLazy) {
auto [s, wasInserted] = insert(name, f);
diff --git a/lld/COFF/SymbolTable.h b/lld/COFF/SymbolTable.h
index 7eb0676..aadd366 100644
--- a/lld/COFF/SymbolTable.h
+++ b/lld/COFF/SymbolTable.h
@@ -31,6 +31,7 @@ class DefinedAbsolute;
class DefinedRegular;
class ImportThunkChunk;
class LazyArchive;
+class SameAddressThunkARM64EC;
class SectionChunk;
class Symbol;
@@ -67,7 +68,7 @@ public:
// Try to resolve any undefined symbols and update the symbol table
// accordingly, then print an error message for any remaining undefined
// symbols and warn about imported local symbols.
- void resolveRemainingUndefines();
+ void resolveRemainingUndefines(std::vector<Undefined *> &aliases);
// Try to resolve undefined symbols with alternate names.
void resolveAlternateNames();
@@ -140,6 +141,7 @@ public:
void addEntryThunk(Symbol *from, Symbol *to);
void addExitThunk(Symbol *from, Symbol *to);
void initializeECThunks();
+ void initializeSameAddressThunks();
void reportDuplicate(Symbol *existing, InputFile *newFile,
SectionChunk *newSc = nullptr,
@@ -159,6 +161,8 @@ public:
// A list of EC EXP+ symbols.
std::vector<Symbol *> expSymbols;
+ std::vector<SameAddressThunkARM64EC *> sameAddressThunks;
+
// A list of DLL exports.
std::vector<Export> exports;
llvm::DenseSet<StringRef> directivesExports;
diff --git a/lld/COFF/Symbols.cpp b/lld/COFF/Symbols.cpp
index b571ce9..ba4f95d1 100644
--- a/lld/COFF/Symbols.cpp
+++ b/lld/COFF/Symbols.cpp
@@ -91,6 +91,14 @@ bool Symbol::isLive() const {
return true;
}
+Defined *Symbol::getDefined() {
+ if (auto d = dyn_cast<Defined>(this))
+ return d;
+ if (auto u = dyn_cast<Undefined>(this))
+ return u->getDefinedWeakAlias();
+ return nullptr;
+}
+
void Symbol::replaceKeepingName(Symbol *other, size_t size) {
StringRef origName = getName();
memcpy(this, other, size);
diff --git a/lld/COFF/Symbols.h b/lld/COFF/Symbols.h
index fd3d8ce..c86ded8 100644
--- a/lld/COFF/Symbols.h
+++ b/lld/COFF/Symbols.h
@@ -95,6 +95,10 @@ public:
symbolKind == LazyDLLSymbolKind;
}
+ // Get the Defined symbol associated with this symbol, either itself or its
+ // weak alias.
+ Defined *getDefined();
+
private:
void computeName();
diff --git a/lld/COFF/Writer.cpp b/lld/COFF/Writer.cpp
index 0765618..21ab9d1 100644
--- a/lld/COFF/Writer.cpp
+++ b/lld/COFF/Writer.cpp
@@ -314,6 +314,7 @@ private:
uint32_t dataDirOffset64;
OutputSection *textSec;
+ OutputSection *wowthkSec;
OutputSection *hexpthkSec;
OutputSection *bssSec;
OutputSection *rdataSec;
@@ -1076,8 +1077,10 @@ void Writer::createSections() {
// Try to match the section order used by link.exe.
textSec = createSection(".text", code | r | x);
- if (isArm64EC(ctx.config.machine))
+ if (isArm64EC(ctx.config.machine)) {
+ wowthkSec = createSection(".wowthk", code | r | x);
hexpthkSec = createSection(".hexpthk", code | r | x);
+ }
bssSec = createSection(".bss", bss | r | w);
rdataSec = createSection(".rdata", data | r);
buildidSec = createSection(".buildid", data | r);
@@ -1129,6 +1132,9 @@ void Writer::createSections() {
if (hasIdata)
locateImportTables();
+ for (auto thunk : ctx.symtab.sameAddressThunks)
+ wowthkSec->addChunk(thunk);
+
// Then create an OutputSection for each section.
// '$' and all following characters in input section names are
// discarded when determining output section. So, .text$foo
@@ -2310,6 +2316,14 @@ void Writer::createECChunks() {
ctx.symtab.findUnderscore("__arm64x_redirection_metadata");
replaceSymbol<DefinedSynthetic>(entryPointsSym, entryPointsSym->getName(),
entryPoints);
+
+ for (auto thunk : ctx.symtab.sameAddressThunks) {
+ // Relocation values are set later in setECSymbols.
+ ctx.dynamicRelocs->add(IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE, sizeof(uint32_t),
+ thunk);
+ ctx.dynamicRelocs->add(IMAGE_DVRT_ARM64X_FIXUP_TYPE_VALUE, sizeof(uint32_t),
+ Arm64XRelocVal(thunk, sizeof(uint32_t)));
+ }
}
// MinGW specific. Gather all relocations that are imported from a DLL even
@@ -2519,6 +2533,9 @@ void Writer::setECSymbols() {
chpeSym->getRVA() + offsetof(chpe_metadata, ExtraRFETableSize),
pdata.last->getRVA() + pdata.last->getSize() - pdata.first->getRVA());
}
+
+ for (SameAddressThunkARM64EC *thunk : ctx.symtab.sameAddressThunks)
+ thunk->setDynamicRelocs(ctx);
}
// Write section contents to a mmap'ed file.
@@ -2544,7 +2561,15 @@ void Writer::writeSections() {
}
parallelForEach(sec->chunks, [&](Chunk *c) {
- c->writeTo(secBuf + c->getRVA() - sec->getRVA());
+ uint8_t *buf = secBuf + c->getRVA() - sec->getRVA();
+ c->writeTo(buf);
+
+ // Write the offset to EC entry thunk preceding section contents. The low
+ // bit is always set, so it's effectively an offset from the last byte of
+ // the offset.
+ if (Defined *entryThunk = c->getEntryThunk())
+ write32le(buf - sizeof(uint32_t),
+ entryThunk->getRVA() - c->getRVA() + 1);
});
}
}
diff --git a/lld/ELF/Arch/LoongArch.cpp b/lld/ELF/Arch/LoongArch.cpp
index a145530..8802c8c 100644
--- a/lld/ELF/Arch/LoongArch.cpp
+++ b/lld/ELF/Arch/LoongArch.cpp
@@ -46,6 +46,8 @@ public:
private:
void tlsdescToIe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
void tlsdescToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const;
+ bool tryGotToPCRel(uint8_t *loc, const Relocation &rHi20,
+ const Relocation &rLo12, uint64_t secAddr) const;
};
} // end anonymous namespace
@@ -1155,6 +1157,78 @@ void LoongArch::tlsdescToLe(uint8_t *loc, const Relocation &rel,
}
}
+// Try GOT indirection to PC relative optimization.
+// From:
+// * pcalau12i $a0, %got_pc_hi20(sym_got)
+// * ld.w/d $a0, $a0, %got_pc_lo12(sym_got)
+// To:
+// * pcalau12i $a0, %pc_hi20(sym)
+// * addi.w/d $a0, $a0, %pc_lo12(sym)
+//
+// Note: Althouth the optimization has been performed, the GOT entries still
+// exists, similarly to AArch64. Eliminating the entries will increase code
+// complexity.
+bool LoongArch::tryGotToPCRel(uint8_t *loc, const Relocation &rHi20,
+ const Relocation &rLo12, uint64_t secAddr) const {
+ // Check if the relocations apply to consecutive instructions.
+ if (rHi20.offset + 4 != rLo12.offset)
+ return false;
+
+ // Check if the relocations reference the same symbol and skip undefined,
+ // preemptible and STT_GNU_IFUNC symbols.
+ if (!rHi20.sym || rHi20.sym != rLo12.sym || !rHi20.sym->isDefined() ||
+ rHi20.sym->isPreemptible || rHi20.sym->isGnuIFunc())
+ return false;
+
+ // GOT references to absolute symbols can't be relaxed to use PCALAU12I/ADDI
+ // in position-independent code because these instructions produce a relative
+ // address.
+ if ((ctx.arg.isPic && !cast<Defined>(*rHi20.sym).section))
+ return false;
+
+ // Check if the addends of the both relocations are zero.
+ if (rHi20.addend != 0 || rLo12.addend != 0)
+ return false;
+
+ const uint32_t currInsn = read32le(loc);
+ const uint32_t nextInsn = read32le(loc + 4);
+ const uint32_t ldOpcode = ctx.arg.is64 ? LD_D : LD_W;
+ // Check if the first instruction is PCALAU12I and the second instruction is
+ // LD.
+ if ((currInsn & 0xfe000000) != PCALAU12I ||
+ (nextInsn & 0xffc00000) != ldOpcode)
+ return false;
+
+ // Check if use the same register.
+ if (getD5(currInsn) != getJ5(nextInsn) || getJ5(nextInsn) != getD5(nextInsn))
+ return false;
+
+ Symbol &sym = *rHi20.sym;
+ uint64_t symLocal = sym.getVA(ctx);
+ const int64_t displace = symLocal - getLoongArchPage(secAddr + rHi20.offset);
+ // Check if the symbol address is in
+ // [(PC & ~0xfff) - 2GiB - 0x800, (PC & ~0xfff) + 2GiB - 0x800).
+ const int64_t underflow = -0x80000000LL - 0x800;
+ const int64_t overflow = 0x80000000LL - 0x800;
+ if (!(displace >= underflow && displace < overflow))
+ return false;
+
+ Relocation newRHi20 = {RE_LOONGARCH_PAGE_PC, R_LARCH_PCALA_HI20, rHi20.offset,
+ rHi20.addend, &sym};
+ Relocation newRLo12 = {R_ABS, R_LARCH_PCALA_LO12, rLo12.offset, rLo12.addend,
+ &sym};
+ uint64_t pageDelta =
+ getLoongArchPageDelta(symLocal, secAddr + rHi20.offset, rHi20.type);
+ // pcalau12i $a0, %pc_hi20
+ write32le(loc, insn(PCALAU12I, getD5(currInsn), 0, 0));
+ relocate(loc, newRHi20, pageDelta);
+ // addi.w/d $a0, $a0, %pc_lo12
+ write32le(loc + 4, insn(ctx.arg.is64 ? ADDI_D : ADDI_W, getD5(nextInsn),
+ getJ5(nextInsn), 0));
+ relocate(loc + 4, newRLo12, SignExtend64(symLocal, 64));
+ return true;
+}
+
// During TLSDESC GD_TO_IE, the converted code sequence always includes an
// instruction related to the Lo12 relocation (ld.[wd]). To obtain correct val
// in `getRelocTargetVA`, expr of this instruction should be adjusted to
@@ -1172,6 +1246,30 @@ RelExpr LoongArch::adjustTlsExpr(RelType type, RelExpr expr) const {
return expr;
}
+static bool pairForGotRels(ArrayRef<Relocation> relocs) {
+ // Check if R_LARCH_GOT_PC_HI20 and R_LARCH_GOT_PC_LO12 always appear in
+ // pairs.
+ size_t i = 0;
+ const size_t size = relocs.size();
+ for (; i != size; ++i) {
+ if (relocs[i].type == R_LARCH_GOT_PC_HI20) {
+ if (i + 1 < size && relocs[i + 1].type == R_LARCH_GOT_PC_LO12) {
+ ++i;
+ continue;
+ }
+ if (relaxable(relocs, i) && i + 2 < size &&
+ relocs[i + 2].type == R_LARCH_GOT_PC_LO12) {
+ i += 2;
+ continue;
+ }
+ break;
+ } else if (relocs[i].type == R_LARCH_GOT_PC_LO12) {
+ break;
+ }
+ }
+ return i == size;
+}
+
void LoongArch::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
const unsigned bits = ctx.arg.is64 ? 64 : 32;
uint64_t secAddr = sec.getOutputSection()->addr;
@@ -1181,6 +1279,7 @@ void LoongArch::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
secAddr += ehIn->getParent()->outSecOff;
bool isExtreme = false, isRelax = false;
const MutableArrayRef<Relocation> relocs = sec.relocs();
+ const bool isPairForGotRels = pairForGotRels(relocs);
for (size_t i = 0, size = relocs.size(); i != size; ++i) {
Relocation &rel = relocs[i];
uint8_t *loc = buf + rel.offset;
@@ -1264,6 +1363,24 @@ void LoongArch::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
tlsdescToLe(loc, rel, val);
}
continue;
+ case RE_LOONGARCH_GOT_PAGE_PC:
+ // In LoongArch, we try GOT indirection to PC relative optimization in
+ // normal or medium code model, whether or not with R_LARCH_RELAX
+ // relocation. Moreover, if the original code sequence can be relaxed to a
+ // single instruction `pcaddi`, the first instruction will be removed and
+ // it will not reach here.
+ if (isPairForGotRels && rel.type == R_LARCH_GOT_PC_HI20) {
+ bool isRelax = relaxable(relocs, i);
+ const Relocation lo12Rel = isRelax ? relocs[i + 2] : relocs[i + 1];
+ if (lo12Rel.type == R_LARCH_GOT_PC_LO12 &&
+ tryGotToPCRel(loc, rel, lo12Rel, secAddr)) {
+ // isRelax: skip relocations R_LARCH_RELAX, R_LARCH_GOT_PC_LO12
+ // !isRelax: skip relocation R_LARCH_GOT_PC_LO12
+ i += isRelax ? 2 : 1;
+ continue;
+ }
+ }
+ break;
default:
break;
}
diff --git a/lld/ELF/InputFiles.cpp b/lld/ELF/InputFiles.cpp
index 37e4c8a..a5921fe 100644
--- a/lld/ELF/InputFiles.cpp
+++ b/lld/ELF/InputFiles.cpp
@@ -20,6 +20,7 @@
#include "llvm/ADT/CachedHashString.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/LTO/LTO.h"
+#include "llvm/Object/Archive.h"
#include "llvm/Object/IRObjectFile.h"
#include "llvm/Support/AArch64AttributeParser.h"
#include "llvm/Support/ARMAttributeParser.h"
@@ -1811,6 +1812,39 @@ static uint8_t getOsAbi(const Triple &t) {
}
}
+// For DTLTO, bitcode member names must be valid paths to files on disk.
+// For thin archives, resolve `memberPath` relative to the archive's location.
+// Returns true if adjusted; false otherwise. Non-thin archives are unsupported.
+static bool dtltoAdjustMemberPathIfThinArchive(Ctx &ctx, StringRef archivePath,
+ std::string &memberPath) {
+ assert(!archivePath.empty());
+
+ if (ctx.arg.dtltoDistributor.empty())
+ return false;
+
+ // Read the archive header to determine if it's a thin archive.
+ auto bufferOrErr =
+ MemoryBuffer::getFileSlice(archivePath, sizeof(ThinArchiveMagic) - 1, 0);
+ if (std::error_code ec = bufferOrErr.getError()) {
+ ErrAlways(ctx) << "cannot open " << archivePath << ": " << ec.message();
+ return false;
+ }
+
+ if (!bufferOrErr->get()->getBuffer().starts_with(ThinArchiveMagic))
+ return false;
+
+ SmallString<128> resolvedPath;
+ if (path::is_relative(memberPath)) {
+ resolvedPath = path::parent_path(archivePath);
+ path::append(resolvedPath, memberPath);
+ } else
+ resolvedPath = memberPath;
+
+ path::remove_dots(resolvedPath, /*remove_dot_dot=*/true);
+ memberPath = resolvedPath.str();
+ return true;
+}
+
BitcodeFile::BitcodeFile(Ctx &ctx, MemoryBufferRef mb, StringRef archiveName,
uint64_t offsetInArchive, bool lazy)
: InputFile(ctx, BitcodeKind, mb) {
@@ -1821,17 +1855,22 @@ BitcodeFile::BitcodeFile(Ctx &ctx, MemoryBufferRef mb, StringRef archiveName,
if (ctx.arg.thinLTOIndexOnly)
path = replaceThinLTOSuffix(ctx, mb.getBufferIdentifier());
- // ThinLTO assumes that all MemoryBufferRefs given to it have a unique
- // name. If two archives define two members with the same name, this
- // causes a collision which result in only one of the objects being taken
- // into consideration at LTO time (which very likely causes undefined
- // symbols later in the link stage). So we append file offset to make
- // filename unique.
StringSaver &ss = ctx.saver;
- StringRef name = archiveName.empty()
- ? ss.save(path)
- : ss.save(archiveName + "(" + path::filename(path) +
- " at " + utostr(offsetInArchive) + ")");
+ StringRef name;
+ if (archiveName.empty() ||
+ dtltoAdjustMemberPathIfThinArchive(ctx, archiveName, path)) {
+ name = ss.save(path);
+ } else {
+ // ThinLTO assumes that all MemoryBufferRefs given to it have a unique
+ // name. If two archives define two members with the same name, this
+ // causes a collision which result in only one of the objects being taken
+ // into consideration at LTO time (which very likely causes undefined
+ // symbols later in the link stage). So we append file offset to make
+ // filename unique.
+ name = ss.save(archiveName + "(" + path::filename(path) + " at " +
+ utostr(offsetInArchive) + ")");
+ }
+
MemoryBufferRef mbref(mb.getBuffer(), name);
obj = CHECK2(lto::InputFile::create(mbref), this);
diff --git a/lld/test/COFF/arm64x-sameaddress.test b/lld/test/COFF/arm64x-sameaddress.test
index c69be9d..819d19b 100644
--- a/lld/test/COFF/arm64x-sameaddress.test
+++ b/lld/test/COFF/arm64x-sameaddress.test
@@ -3,16 +3,103 @@ RUN: split-file %s %t.dir && cd %t.dir
RUN: llvm-mc -filetype=obj -triple=arm64ec-windows func-arm64ec.s -o func-arm64ec.obj
RUN: llvm-mc -filetype=obj -triple=aarch64-windows func-arm64.s -o func-arm64.obj
+RUN: llvm-mc -filetype=obj -triple=arm64ec-windows ref-arm64ec.s -o ref-arm64ec.obj
+RUN: llvm-mc -filetype=obj -triple=aarch64-windows ref-arm64.s -o ref-arm64.obj
RUN: llvm-mc -filetype=obj -triple=arm64ec-windows drectve.s -o drectve.obj
RUN: llvm-mc -filetype=obj -triple=aarch64-windows drectve.s -o drectve-arm64.obj
RUN: llvm-mc -filetype=obj -triple=arm64ec-windows %S/Inputs/loadconfig-arm64ec.s -o loadconfig-arm64ec.obj
RUN: llvm-mc -filetype=obj -triple=aarch64-windows %S/Inputs/loadconfig-arm64.s -o loadconfig-arm64.obj
RUN: lld-link -machine:arm64x -dll -noentry -out:out.dll loadconfig-arm64.obj loadconfig-arm64ec.obj \
-RUN: func-arm64.obj func-arm64ec.obj drectve.obj
+RUN: func-arm64.obj func-arm64ec.obj ref-arm64.obj ref-arm64ec.obj drectve.obj
+
+RUN: llvm-objdump -d out.dll | FileCheck --check-prefix=DISASM %s
+DISASM: 000000180001000 <.text>:
+DISASM-NEXT: 180001000: d2800020 mov x0, #0x1 // =1
+DISASM-NEXT: 180001004: d65f03c0 ret
+DISASM-NEXT: ...
+DISASM-NEXT: 180002000: 00000019 udf #0x19
+DISASM-NEXT: 180002004: d2800040 mov x0, #0x2 // =2
+DISASM-NEXT: 180002008: d65f03c0 ret
+DISASM-NEXT: 18000200c: 0000000d udf #0xd
+DISASM-NEXT: 180002010: f0fffff0 adrp x16, 0x180001000 <.text>
+DISASM-NEXT: 180002014: 91000210 add x16, x16, #0x0
+DISASM-NEXT: 180002018: d61f0200 br x16
+DISASM-NEXT: 18000201c: d2800060 mov x0, #0x3 // =3
+DISASM-NEXT: 180002020: d65f03c0 ret
+
+RUN: llvm-readobj --hex-dump=.test out.dll | FileCheck --check-prefix=TESTSEC %s
+TESTSEC: 10200000 10200000 10200000
+
+RUN: llvm-readobj --coff-load-config out.dll | FileCheck --check-prefix=DYNRELOCS %s
+DYNRELOCS: DynamicRelocations [
+DYNRELOCS-NEXT: Version: 0x1
+DYNRELOCS-NEXT: Arm64X [
+DYNRELOCS-NEXT: Entry [
+DYNRELOCS-NEXT: RVA: 0x7C
+DYNRELOCS-NEXT: Type: VALUE
+DYNRELOCS-NEXT: Size: 0x2
+DYNRELOCS-NEXT: Value: 0x8664
+DYNRELOCS-NEXT: ]
+DYNRELOCS-NEXT: Entry [
+DYNRELOCS-NEXT: RVA: 0x150
+DYNRELOCS-NEXT: Type: VALUE
+DYNRELOCS-NEXT: Size: 0x4
+DYNRELOCS-NEXT: Value: 0x3150
+DYNRELOCS-NEXT: ]
+DYNRELOCS-NEXT: Entry [
+DYNRELOCS-NEXT: RVA: 0x154
+DYNRELOCS-NEXT: Type: VALUE
+DYNRELOCS-NEXT: Size: 0x4
+DYNRELOCS-NEXT: Value: 0x140
+DYNRELOCS-NEXT: ]
+DYNRELOCS-NEXT: Entry [
+DYNRELOCS-NEXT: RVA: 0x2010
+DYNRELOCS-NEXT: Type: VALUE
+DYNRELOCS-NEXT: Size: 0x4
+DYNRELOCS-NEXT: Value: 0x90000010
+DYNRELOCS-NEXT: ]
+DYNRELOCS-NEXT: Entry [
+DYNRELOCS-NEXT: RVA: 0x2014
+DYNRELOCS-NEXT: Type: VALUE
+DYNRELOCS-NEXT: Size: 0x4
+DYNRELOCS-NEXT: Value: 0x91001210
+DYNRELOCS-NEXT: ]
+DYNRELOCS-NEXT: ]
+DYNRELOCS-NEXT: ]
RUN: lld-link -machine:arm64x -dll -noentry -out:out-cmd.dll loadconfig-arm64.obj loadconfig-arm64ec.obj \
-RUN: func-arm64.obj func-arm64ec.obj -arm64xsameaddress:func
+RUN: func-arm64.obj func-arm64ec.obj ref-arm64.obj ref-arm64ec.obj -arm64xsameaddress:func
+RUN: llvm-objdump -d out-cmd.dll | FileCheck --check-prefix=DISASM %s
+RUN: llvm-readobj --hex-dump=.test out-cmd.dll | FileCheck --check-prefix=TESTSEC %s
+RUN: llvm-readobj --coff-load-config out-cmd.dll | FileCheck --check-prefix=DYNRELOCS %s
+
+RUN: lld-link -machine:arm64x -dll -noentry -out:out-both.dll loadconfig-arm64.obj loadconfig-arm64ec.obj \
+RUN: func-arm64.obj func-arm64ec.obj ref-arm64.obj ref-arm64ec.obj drectve.obj -arm64xsameaddress:func
+RUN: llvm-objdump -d out-both.dll | FileCheck --check-prefix=DISASM %s
+RUN: llvm-readobj --hex-dump=.test out-both.dll | FileCheck --check-prefix=TESTSEC %s
+RUN: llvm-readobj --coff-load-config out-both.dll | FileCheck --check-prefix=DYNRELOCS %s
+
+Check that if any of the sameaddress symbols is not alive, the thunk is not generated.
+
+RUN: lld-link -machine:arm64x -dll -noentry -out:out-live1.dll loadconfig-arm64.obj loadconfig-arm64ec.obj \
+RUN: func-arm64.obj func-arm64ec.obj ref-arm64ec.obj drectve.obj
+RUN: llvm-objdump -d out-live1.dll | FileCheck --check-prefix=DISASM-LIVE1 %s
+DISASM-LIVE1: 0000000180001000 <.text>:
+DISASM-LIVE1-NEXT: 180001000: 00000009 udf #0x9
+DISASM-LIVE1-NEXT: 180001004: d2800040 mov x0, #0x2 // =2
+DISASM-LIVE1-NEXT: 180001008: d65f03c0 ret
+DISASM-LIVE1-NEXT: 18000100c: d2800060 mov x0, #0x3 // =3
+DISASM-LIVE1-NEXT: 180001010: d65f03c0 ret
+DISASM-LIVE1-NOT: br
+
+RUN: lld-link -machine:arm64x -dll -noentry -out:out-live2.dll loadconfig-arm64.obj loadconfig-arm64ec.obj \
+RUN: func-arm64.obj func-arm64ec.obj ref-arm64.obj drectve.obj
+RUN: llvm-objdump -d out-live2.dll | FileCheck --check-prefix=DISASM-LIVE2 %s
+DISASM-LIVE2: 0000000180001000 <.text>:
+DISASM-LIVE2-NEXT: 180001000: d2800020 mov x0, #0x1 // =1
+DISASM-LIVE2-NEXT: 180001004: d65f03c0 ret
+DISASM-LIVE2-NOT: br
RUN: lld-link -machine:arm64ec -dll -noentry -out:out-ec.dll loadconfig-arm64ec.obj func-arm64ec.obj drectve.obj
@@ -20,6 +107,10 @@ RUN: lld-link -machine:arm64x -dll -noentry -out:out-warn.dll loadconfig-arm64.o
RUN: func-arm64.obj func-arm64ec.obj drectve-arm64.obj 2>&1 | FileCheck --check-prefix=WARN %s
WARN: lld-link: warning: -arm64xsameaddress: is not allowed in non-ARM64EC files (drectve-arm64.obj)
+RUN: lld-link -machine:arm64 -dll -noentry -out:out-warn2.dll loadconfig-arm64.obj \
+RUN: func-arm64.obj -arm64xsameaddress:func 2>&1 | FileCheck --check-prefix=WARN2 %s
+WARN2: lld-link: warning: -arm64xsameaddress: is allowed only on EC targets
+
#--- func-arm64.s
.section .text,"xr",discard,func
.globl func
@@ -27,6 +118,10 @@ func:
mov x0, #1
ret
+#--- ref-arm64.s
+ .section .test,"dr"
+ .rva func
+
#--- func-arm64ec.s
.section .text,"xr",discard,"#func"
.globl "#func"
@@ -43,14 +138,16 @@ entry_thunk:
mov x0, #3
ret
- .section .test,"dr"
- .rva func
-
.section .hybmp$x,"yi"
.symidx "#func"
.symidx entry_thunk
.word 1
+#--- ref-arm64ec.s
+ .section .test,"dr"
+ .rva func
+ .rva "#func"
+
#--- drectve.s
.section .drectve, "yn"
.ascii " -arm64xsameaddress:func"
diff --git a/lld/test/ELF/dtlto/archive-thin.test b/lld/test/ELF/dtlto/archive-thin.test
new file mode 100644
index 0000000..bcd5f13
--- /dev/null
+++ b/lld/test/ELF/dtlto/archive-thin.test
@@ -0,0 +1,65 @@
+REQUIRES: x86
+
+## Test that a DTLTO link assigns Module IDs to thin archive members as expected.
+
+RUN: rm -rf %t && split-file %s %t && cd %t
+
+RUN: sed 's/@t1/@t2/g' t1.ll > t2.ll
+RUN: sed 's/@t1/@t3/g' t1.ll > t3.ll
+
+RUN: opt -thinlto-bc t1.ll -o t1.bc
+RUN: opt -thinlto-bc t2.ll -o t2.bc
+RUN: opt -thinlto-bc t3.ll -o t3.bc
+
+RUN: llvm-ar rcs t1.a t1.bc --thin
+## Create this bitcode thin archive in a subdirectory to test the expansion of
+## the path to a bitcode file that is referenced using "..", e.g., in this case
+## "../t2.bc".
+RUN: mkdir lib
+RUN: llvm-ar rcs lib/t2.a t2.bc --thin
+## Create this bitcode thin archive with an absolute path entry containing "..".
+RUN: llvm-ar rcs t3.a %t/lib/../t3.bc --thin
+
+## Link from a different directory to ensure that thin archive member paths are
+## resolved correctly relative to the archive locations.
+RUN: mkdir %t/out && cd %t/out
+
+## Build a response file to share common linking arguments.
+## Note: validate.py does not perform any compilation. Instead, it validates the
+## received JSON, pretty-prints the JSON and the supplied arguments, and then
+## exits with an error. This allows FileCheck directives to verify the
+## distributor inputs.
+RUN: echo '%t/t1.a %t/lib/t2.a ../t3.a \
+RUN: --thinlto-distributor="%python" \
+RUN: --thinlto-distributor-arg="%llvm_src_root/utils/dtlto/validate.py"' > rsp
+
+## Link thin archives using -u/--undefined.
+RUN: not ld.lld @rsp -u t1 -u t2 -u t3 2>&1 | FileCheck %s
+
+## Link thin archives using --whole-archive.
+RUN: not ld.lld --whole-archive @rsp 2>&1 | FileCheck %s
+
+## Check the module IDs in the JSON jobs description.
+CHECK: "jobs": [
+CHECK: "inputs": [
+CHECK-NEXT: "{{([a-zA-Z]:)|/}}
+CHECK-SAME: {{/|\\\\}}archive-thin.test.tmp{{/|\\\\}}t1.bc"
+
+CHECK: "inputs": [
+CHECK-NEXT: "{{([a-zA-Z]\:)|/}}
+CHECK-SAME: {{/|\\\\}}archive-thin.test.tmp{{/|\\\\}}t2.bc"
+
+CHECK: "inputs": [
+CHECK-NEXT: "{{([a-zA-Z]:)|/}}
+CHECK-SAME: {{/|\\\\}}archive-thin.test.tmp{{/|\\\\}}t3.bc"
+
+## Ensure backend compilation fails as expected (due to validate.py dummy behavior).
+CHECK: error: DTLTO backend compilation: cannot open native object file:
+
+#--- t1.ll
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @t1() {
+ ret void
+}
diff --git a/lld/test/ELF/loongarch-pc-hi20-lo12-got.s b/lld/test/ELF/loongarch-pc-hi20-lo12-got.s
new file mode 100644
index 0000000..acd9400
--- /dev/null
+++ b/lld/test/ELF/loongarch-pc-hi20-lo12-got.s
@@ -0,0 +1,145 @@
+# REQUIRES: loongarch
+# RUN: rm -rf %t && split-file %s %t && cd %t
+
+# RUN: llvm-mc --filetype=obj --triple=loongarch64 a.s -o a.o
+# RUN: llvm-mc --filetype=obj --triple=loongarch64 unpaired.s -o unpaired.o
+# RUN: llvm-mc --filetype=obj --triple=loongarch64 lone-ldr.s -o lone-ldr.o
+
+# RUN: ld.lld a.o -T within-range.t -o a
+# RUN: llvm-objdump -d --no-show-raw-insn a | FileCheck %s
+
+## This test verifies the encoding when the register $a0 is used.
+# CHECK: pcalau12i $a0, 0
+# CHECK-NEXT: addi.d $a0, $a0, -2048
+
+## PCALAU12I contains a nonzero addend, no relaxations should be applied.
+# CHECK-NEXT: pcalau12i $a1, 2
+# CHECK-NEXT: ld.d $a1, $a1, -2048
+
+## LD contains a nonzero addend, no relaxations should be applied.
+# CHECK-NEXT: pcalau12i $a2, 2
+# CHECK-NEXT: ld.d $a2, $a2, -2040
+
+## PCALAU12I and LD use different registers, no relaxations should be applied.
+# CHECK-NEXT: pcalau12i $a3, 2
+# CHECK-NEXT: ld.d $a4, $a3, -2048
+
+## PCALAU12I and LD use different registers, no relaxations should be applied.
+# CHECK-NEXT: pcalau12i $a5, 2
+# CHECK-NEXT: ld.d $a5, $a6, -2048
+
+# RUN: ld.lld a.o -T underflow-range.t -o a-underflow
+# RUN: llvm-objdump -d --no-show-raw-insn a-underflow | FileCheck --check-prefix=OUTRANGE %s
+
+# RUN: ld.lld a.o -T overflow-range.t -o a-overflow
+# RUN: llvm-objdump -d --no-show-raw-insn a-overflow | FileCheck --check-prefix=OUTRANGE %s
+
+# OUTRANGE: pcalau12i $a0, 1
+# OUTRANGE-NEXT: ld.d $a0, $a0, 0
+
+## Relocations do not appear in pairs, no relaxations should be applied.
+# RUN: ld.lld unpaired.o -T within-range.t -o unpaired
+# RUN: llvm-objdump --no-show-raw-insn -d unpaired | FileCheck --check-prefix=UNPAIRED %s
+
+# UNPAIRED: pcalau12i $a0, 2
+# UNPAIRED-NEXT: b 8
+# UNPAIRED-NEXT: pcalau12i $a0, 2
+# UNPAIRED: ld.d $a0, $a0, -2048
+
+## Relocations do not appear in pairs, no relaxations should be applied.
+# RUN: ld.lld lone-ldr.o -T within-range.t -o lone-ldr
+# RUN: llvm-objdump --no-show-raw-insn -d lone-ldr | FileCheck --check-prefix=LONE-LDR %s
+
+# LONE-LDR: ld.d $a0, $a0, -2048
+
+## 32-bit code is mostly the same. We only test a few variants.
+# RUN: llvm-mc --filetype=obj --triple=loongarch32 a.32.s -o a.32.o
+# RUN: ld.lld a.32.o -T within-range.t -o a32
+# RUN: llvm-objdump -d --no-show-raw-insn a32 | FileCheck --check-prefix=CHECK32 %s
+
+## This test verifies the encoding when the register $a0 is used.
+# CHECK32: pcalau12i $a0, 0
+# CHECK32-NEXT: addi.w $a0, $a0, -2048
+
+
+## This linker script ensures that .rodata and .text are sufficiently close to
+## each other so that the pcalau12i + ld pair can be relaxed to pcalau12i + add.
+#--- within-range.t
+SECTIONS {
+ .rodata 0x1800: { *(.rodata) }
+ .text 0x2800: { *(.text) }
+ .got 0x3800: { *(.got) }
+}
+
+## This linker script ensures that .rodata and .text are sufficiently far apart
+## so that the pcalau12i + ld pair cannot be relaxed to pcalau12i + add.
+#--- underflow-range.t
+SECTIONS {
+ .rodata 0x800-4: { *(.rodata) }
+ .got 0x80002000: { *(.got) }
+ .text 0x80001000: { *(.text) } /* (0x800-4)+2GB+0x800+4 */
+}
+
+#--- overflow-range.t
+SECTIONS {
+ .text 0x1000: { *(.text) }
+ .got 0x2000: { *(.got) }
+ .rodata 0x80000800 : { *(.rodata) } /* 0x1000+2GB-0x800 */
+}
+
+#--- a.s
+## Symbol 'x' is nonpreemptible, the optimization should be applied.
+.rodata
+.hidden x
+x:
+.word 10
+
+.text
+.global _start
+_start:
+ pcalau12i $a0, %got_pc_hi20(x)
+ ld.d $a0, $a0, %got_pc_lo12(x)
+ pcalau12i $a1, %got_pc_hi20(x+1)
+ ld.d $a1, $a1, %got_pc_lo12(x)
+ pcalau12i $a2, %got_pc_hi20(x)
+ ld.d $a2, $a2, %got_pc_lo12(x+8)
+ pcalau12i $a3, %got_pc_hi20(x)
+ ld.d $a4, $a3, %got_pc_lo12(x)
+ pcalau12i $a5, %got_pc_hi20(x)
+ ld.d $a5, $a6, %got_pc_lo12(x)
+
+#--- unpaired.s
+.text
+.hidden x
+x:
+ nop
+.global _start
+_start:
+ pcalau12i $a0, %got_pc_hi20(x)
+ b L
+ pcalau12i $a0, %got_pc_hi20(x)
+L:
+ ld.d $a0, $a0, %got_pc_lo12(x)
+
+#--- lone-ldr.s
+.text
+.hidden x
+x:
+ nop
+.global _start
+_start:
+ ld.d $a0, $a0, %got_pc_lo12(x)
+
+
+#--- a.32.s
+## Symbol 'x' is nonpreemptible, the optimization should be applied.
+.rodata
+.hidden x
+x:
+.word 10
+
+.text
+.global _start
+_start:
+ pcalau12i $a0, %got_pc_hi20(x)
+ ld.w $a0, $a0, %got_pc_lo12(x)
diff --git a/lld/test/ELF/loongarch-relax-pc-hi20-lo12.s b/lld/test/ELF/loongarch-relax-pc-hi20-lo12.s
index a33f866..08d5d3e 100644
--- a/lld/test/ELF/loongarch-relax-pc-hi20-lo12.s
+++ b/lld/test/ELF/loongarch-relax-pc-hi20-lo12.s
@@ -31,24 +31,26 @@
## offset = 0x410000 - 0x10000: 0x400 pages, page offset 0
# NORELAX32-NEXT: 10000: pcalau12i $a0, 1024
# NORELAX32-NEXT: addi.w $a0, $a0, 0
+## Not relaxation, convertion to PCRel.
# NORELAX32-NEXT: pcalau12i $a0, 1024
-# NORELAX32-NEXT: ld.w $a0, $a0, 4
+# NORELAX32-NEXT: addi.w $a0, $a0, 0
# NORELAX32-NEXT: pcalau12i $a0, 1024
# NORELAX32-NEXT: addi.w $a0, $a0, 0
# NORELAX32-NEXT: pcalau12i $a0, 1024
-# NORELAX32-NEXT: ld.w $a0, $a0, 4
+# NORELAX32-NEXT: addi.w $a0, $a0, 0
# NORELAX64-LABEL: <_start>:
## offset exceed range of pcaddi
## offset = 0x410000 - 0x10000: 0x400 pages, page offset 0
# NORELAX64-NEXT: 10000: pcalau12i $a0, 1024
# NORELAX64-NEXT: addi.d $a0, $a0, 0
+## Not relaxation, convertion to PCRel.
# NORELAX64-NEXT: pcalau12i $a0, 1024
-# NORELAX64-NEXT: ld.d $a0, $a0, 8
+# NORELAX64-NEXT: addi.d $a0, $a0, 0
# NORELAX64-NEXT: pcalau12i $a0, 1024
# NORELAX64-NEXT: addi.d $a0, $a0, 0
# NORELAX64-NEXT: pcalau12i $a0, 1024
-# NORELAX64-NEXT: ld.d $a0, $a0, 8
+# NORELAX64-NEXT: addi.d $a0, $a0, 0
## GOT references with non-zero addends. No relaxation.
diff --git a/lldb/bindings/interface/SBThreadExtensions.i b/lldb/bindings/interface/SBThreadExtensions.i
index 267faad..4ec9f10 100644
--- a/lldb/bindings/interface/SBThreadExtensions.i
+++ b/lldb/bindings/interface/SBThreadExtensions.i
@@ -45,6 +45,9 @@ STRING_EXTENSION_OUTSIDE(SBThread)
frames.append(frame)
return frames
+ def get_stop_description(self):
+ return self.GetStopDescription(1024)
+
def get_stop_reason_data(self):
return [
self.GetStopReasonDataAtIndex(idx)
@@ -69,6 +72,7 @@ STRING_EXTENSION_OUTSIDE(SBThread)
name = property(GetName, None, doc='''A read only property that returns the name of this thread as a string.''')
queue = property(GetQueueName, None, doc='''A read only property that returns the dispatch queue name of this thread as a string.''')
queue_id = property(GetQueueID, None, doc='''A read only property that returns the dispatch queue id of this thread as an integer.''')
+ stop_description = property(get_stop_description, None, doc='''A read only property that returns a string describing the reason this thread stopped.''')
stop_reason = property(GetStopReason, None, doc='''A read only property that returns an lldb enumeration value (see enumerations that start with "lldb.eStopReason") that represents the reason this thread stopped.''')
stop_reason_data = property(get_stop_reason_data, None, doc='''A read only property that returns the stop reason data as a list.''')
is_suspended = property(IsSuspended, None, doc='''A read only property that returns a boolean value that indicates if this thread is suspended.''')
diff --git a/lldb/bindings/python/python-extensions.swig b/lldb/bindings/python/python-extensions.swig
index 4ba1607..40fa768 100644
--- a/lldb/bindings/python/python-extensions.swig
+++ b/lldb/bindings/python/python-extensions.swig
@@ -594,6 +594,7 @@ def is_numeric_type(basic_type):
if basic_type == eBasicTypeFloat: return (True,True)
if basic_type == eBasicTypeDouble: return (True,True)
if basic_type == eBasicTypeLongDouble: return (True,True)
+ if basic_type == eBasicTypeFloat128: return (True,True)
if basic_type == eBasicTypeFloatComplex: return (True,True)
if basic_type == eBasicTypeDoubleComplex: return (True,True)
if basic_type == eBasicTypeLongDoubleComplex: return (True,True)
diff --git a/lldb/docs/python_api_enums.rst b/lldb/docs/python_api_enums.rst
index b6a2497..a43a47b 100644
--- a/lldb/docs/python_api_enums.rst
+++ b/lldb/docs/python_api_enums.rst
@@ -321,6 +321,7 @@ Format
.. py:data:: eFormatInstruction
.. py:data:: eFormatVoid
.. py:data:: eFormatUnicode8
+.. py:data:: eFormatFloat128
.. _DescriptionLevel:
@@ -1045,6 +1046,7 @@ BasicType
.. py:data:: eBasicTypeObjCSel
.. py:data:: eBasicTypeNullPtr
.. py:data:: eBasicTypeOther
+.. py:data:: eBasicTypeFloat128
.. _TraceType:
diff --git a/lldb/docs/resources/lldbgdbremote.md b/lldb/docs/resources/lldbgdbremote.md
index 41628cf..36b95f1 100644
--- a/lldb/docs/resources/lldbgdbremote.md
+++ b/lldb/docs/resources/lldbgdbremote.md
@@ -1998,22 +1998,6 @@ threads (live system debug) / cores (JTAG) in your program have
stopped and allows LLDB to display and control your program
correctly.
-## qWasmCallStack
-
-Get the Wasm call stack for the given thread id. This returns a hex-encoded
-list of PC values, one for each frame of the call stack. To match the Wasm
-specification, the addresses are encoded in little endian byte order, even if
-the endian of the Wasm runtime's host is not little endian.
-
-```
-send packet: $qWasmCallStack:202dbe040#08
-read packet: $9c01000000000040e501000000000040fe01000000000040#
-```
-
-**Priority to Implement:** Only required for Wasm support. This packed is
-supported by the [WAMR](https://github.com/bytecodealliance/wasm-micro-runtime)
-and [V8](https://v8.dev) Wasm runtimes.
-
## qWatchpointSupportInfo
Get the number of hardware watchpoints available on the remote target.
@@ -2479,3 +2463,70 @@ omitting them will work fine; these numbers are always base 16.
The length of the payload is not provided. A reliable, 8-bit clean,
transport layer is assumed.
+
+## Wasm Packets
+
+The packet below are supported by the
+[WAMR](https://github.com/bytecodealliance/wasm-micro-runtime) and
+[V8](https://v8.dev) Wasm runtimes.
+
+
+### qWasmCallStack
+
+Get the Wasm call stack for the given thread id. This returns a hex-encoded
+list of PC values, one for each frame of the call stack. To match the Wasm
+specification, the addresses are encoded in little endian byte order, even if
+the endian of the Wasm runtime's host is not little endian.
+
+```
+send packet: $qWasmCallStack:202dbe040#08
+read packet: $9c01000000000040e501000000000040fe01000000000040#
+```
+
+**Priority to Implement:** Only required for Wasm support. Necessary to show
+stack traces.
+
+### qWasmGlobal
+
+Get the value of a Wasm global variable for the given frame index at the given
+variable index. The indexes are encoded as base 10. The result is a hex-encoded
+address from where to read the value.
+
+```
+send packet: $qWasmGlobal:0;2#cb
+read packet: $e0030100#b9
+```
+
+**Priority to Implement:** Only required for Wasm support. Necessary to show
+variables.
+
+
+### qWasmLocal
+
+Get the value of a Wasm function argument or local variable for the given frame
+index at the given variable index. The indexes are encoded as base 10. The
+result is a hex-encoded address from where to read the value.
+
+
+```
+send packet: $qWasmLocal:0;2#cb
+read packet: $e0030100#b9
+```
+
+**Priority to Implement:** Only required for Wasm support. Necessary to show
+variables.
+
+
+### qWasmStackValue
+
+Get the value of a Wasm local variable from the Wasm operand stack, for the
+given frame index at the given variable index. The indexes are encoded as base
+10. The result is a hex-encoded address from where to read value.
+
+```
+send packet: $qWasmStackValue:0;2#cb
+read packet: $e0030100#b9
+```
+
+**Priority to Implement:** Only required for Wasm support. Necessary to show
+variables.
diff --git a/lldb/docs/use/formatting.rst b/lldb/docs/use/formatting.rst
index 39ccfed..c5a880c 100644
--- a/lldb/docs/use/formatting.rst
+++ b/lldb/docs/use/formatting.rst
@@ -344,19 +344,19 @@ E.g., the following setting would reconstruct the entire function name (and is L
::
- (lldb) settings set plugin.cplusplus.dislpay.function-name-format "${function.return-left}${function.scope}${function.basename}${function.template-arguments}${function.formatted-arguments}${function.qualifiers}${function.return-right}${function.suffix}"
+ (lldb) settings set plugin.cplusplus.display.function-name-format "${function.return-left}${function.scope}${function.basename}${function.template-arguments}${function.formatted-arguments}${function.qualifiers}${function.return-right}${function.suffix}"
If a user wanted to only print the name and arguments of a C++ function one could do:
::
- (lldb) settings set plugin.cplusplus.dislpay.function-name-format "${function.scope}${function.basename}${function.formatted-arguments}"
+ (lldb) settings set plugin.cplusplus.display.function-name-format "${function.scope}${function.basename}${function.formatted-arguments}"
Then the following would highlight just the basename in green:
::
- (lldb) settings set plugin.cplusplus.dislpay.function-name-format "${function.scope}${ansi.fg.yellow}${function.basename}${ansi.normal}${function.formatted-arguments}"
+ (lldb) settings set plugin.cplusplus.display.function-name-format "${function.scope}${ansi.fg.yellow}${function.basename}${ansi.normal}${function.formatted-arguments}"
The ``${function.name-with-args}`` by default asks the language plugin whether it supports a language-specific ``function-name-format`` (e.g., the ``plugin.cplusplus.display.function-name-format`` for C++), and if it does, uses it. Otherwise it will display the demangled function name.
diff --git a/lldb/include/lldb/Core/Module.h b/lldb/include/lldb/Core/Module.h
index 8bb55c9..8513e14 100644
--- a/lldb/include/lldb/Core/Module.h
+++ b/lldb/include/lldb/Core/Module.h
@@ -86,7 +86,8 @@ struct ModuleFunctionSearchOptions {
///
/// The module will parse more detailed information as more queries are made.
class Module : public std::enable_shared_from_this<Module>,
- public SymbolContextScope {
+ public SymbolContextScope,
+ public UserID {
public:
class LookupInfo;
// Static functions that can track the lifetime of module objects. This is
diff --git a/lldb/include/lldb/Core/ModuleList.h b/lldb/include/lldb/Core/ModuleList.h
index d5e291f3..6ecdcf1 100644
--- a/lldb/include/lldb/Core/ModuleList.h
+++ b/lldb/include/lldb/Core/ModuleList.h
@@ -352,6 +352,14 @@ public:
// UUID values is very efficient and accurate.
lldb::ModuleSP FindModule(const UUID &uuid) const;
+ /// Find a module by LLDB-specific unique identifier.
+ ///
+ /// \param[in] uid The UID of the module assigned to it on construction.
+ ///
+ /// \returns ModuleSP of module with \c uid. Returns nullptr if no such
+ /// module could be found.
+ lldb::ModuleSP FindModule(lldb::user_id_t uid) const;
+
/// Finds the first module whose file specification matches \a module_spec.
lldb::ModuleSP FindFirstModule(const ModuleSpec &module_spec) const;
diff --git a/lldb/include/lldb/Expression/Expression.h b/lldb/include/lldb/Expression/Expression.h
index 8de9364..20067f4 100644
--- a/lldb/include/lldb/Expression/Expression.h
+++ b/lldb/include/lldb/Expression/Expression.h
@@ -13,6 +13,7 @@
#include <string>
#include <vector>
+#include "llvm/Support/FormatProviders.h"
#include "lldb/Expression/ExpressionTypeSystemHelper.h"
#include "lldb/lldb-forward.h"
@@ -96,6 +97,62 @@ protected:
///invalid.
};
+/// Holds parsed information about a function call label that
+/// LLDB attaches as an AsmLabel to function AST nodes it parses
+/// from debug-info.
+///
+/// The format being:
+///
+/// <prefix>:<module uid>:<symbol uid>:<name>
+///
+/// The label string needs to stay valid for the entire lifetime
+/// of this object.
+struct FunctionCallLabel {
+ /// Unique identifier of the lldb_private::Module
+ /// which contains the symbol identified by \c symbol_id.
+ lldb::user_id_t module_id;
+
+ /// Unique identifier of the function symbol on which to
+ /// perform the function call. For example, for DWARF this would
+ /// be the DIE UID.
+ lldb::user_id_t symbol_id;
+
+ /// Name to use when searching for the function symbol in
+ /// \c module_id. For most function calls this will be a
+ /// mangled name. In cases where a mangled name can't be used,
+ /// this will be the function name.
+ ///
+ /// NOTE: kept as last element so we don't have to worry about
+ /// ':' in the mangled name when parsing the label.
+ llvm::StringRef lookup_name;
+
+ /// Decodes the specified function \c label into a \c FunctionCallLabel.
+ static llvm::Expected<FunctionCallLabel> fromString(llvm::StringRef label);
+
+ /// Encode this FunctionCallLabel into its string representation.
+ ///
+ /// The representation roundtrips through \c fromString:
+ /// \code{.cpp}
+ /// llvm::StringRef encoded = "$__lldb_func:0x0:0x0:_Z3foov";
+ /// FunctionCallLabel label = *fromString(label);
+ ///
+ /// assert (label.toString() == encoded);
+ /// assert (*fromString(label.toString()) == label);
+ /// \endcode
+ std::string toString() const;
+};
+
+/// LLDB attaches this prefix to mangled names of functions that get called
+/// from JITted expressions.
+inline constexpr llvm::StringRef FunctionCallLabelPrefix = "$__lldb_func";
+
} // namespace lldb_private
+namespace llvm {
+template <> struct format_provider<lldb_private::FunctionCallLabel> {
+ static void format(const lldb_private::FunctionCallLabel &label,
+ raw_ostream &OS, StringRef Style);
+};
+} // namespace llvm
+
#endif // LLDB_EXPRESSION_EXPRESSION_H
diff --git a/lldb/include/lldb/Symbol/SymbolFile.h b/lldb/include/lldb/Symbol/SymbolFile.h
index e95f955..bbc615d 100644
--- a/lldb/include/lldb/Symbol/SymbolFile.h
+++ b/lldb/include/lldb/Symbol/SymbolFile.h
@@ -18,6 +18,7 @@
#include "lldb/Symbol/CompilerType.h"
#include "lldb/Symbol/Function.h"
#include "lldb/Symbol/SourceModule.h"
+#include "lldb/Symbol/SymbolContext.h"
#include "lldb/Symbol/Type.h"
#include "lldb/Symbol/TypeList.h"
#include "lldb/Symbol/TypeSystem.h"
@@ -328,6 +329,18 @@ public:
GetMangledNamesForFunction(const std::string &scope_qualified_name,
std::vector<ConstString> &mangled_names);
+ /// Resolves the function corresponding to the specified LLDB function
+ /// call \c label.
+ ///
+ /// \param[in] label The FunctionCallLabel to be resolved.
+ ///
+ /// \returns An llvm::Error if the specified \c label couldn't be resolved.
+ /// Returns the resolved function (as a SymbolContext) otherwise.
+ virtual llvm::Expected<SymbolContext>
+ ResolveFunctionCallLabel(const FunctionCallLabel &label) {
+ return llvm::createStringError("Not implemented");
+ }
+
virtual void GetTypes(lldb_private::SymbolContextScope *sc_scope,
lldb::TypeClass type_mask,
lldb_private::TypeList &type_list) = 0;
diff --git a/lldb/include/lldb/Symbol/TypeSystem.h b/lldb/include/lldb/Symbol/TypeSystem.h
index cb1f013..16a2e0b 100644
--- a/lldb/include/lldb/Symbol/TypeSystem.h
+++ b/lldb/include/lldb/Symbol/TypeSystem.h
@@ -310,7 +310,8 @@ public:
// Exploring the type
- virtual const llvm::fltSemantics &GetFloatTypeSemantics(size_t byte_size) = 0;
+ virtual const llvm::fltSemantics &
+ GetFloatTypeSemantics(size_t byte_size, lldb::Format format) = 0;
virtual llvm::Expected<uint64_t>
GetBitSize(lldb::opaque_compiler_type_t type,
diff --git a/lldb/include/lldb/Target/Process.h b/lldb/include/lldb/Target/Process.h
index 7e66e31..dc75d98 100644
--- a/lldb/include/lldb/Target/Process.h
+++ b/lldb/include/lldb/Target/Process.h
@@ -100,6 +100,7 @@ public:
void SetStopOnSharedLibraryEvents(bool stop);
bool GetDisableLangRuntimeUnwindPlans() const;
void SetDisableLangRuntimeUnwindPlans(bool disable);
+ void DisableLanguageRuntimeUnwindPlansCallback();
bool GetDetachKeepsStopped() const;
void SetDetachKeepsStopped(bool keep_stopped);
bool GetWarningsOptimization() const;
diff --git a/lldb/include/lldb/lldb-enumerations.h b/lldb/include/lldb/lldb-enumerations.h
index 171a650..c63c1f0 100644
--- a/lldb/include/lldb/lldb-enumerations.h
+++ b/lldb/include/lldb/lldb-enumerations.h
@@ -198,11 +198,15 @@ enum Format {
///< character arrays that can contain non printable
///< characters
eFormatAddressInfo, ///< Describe what an address points to (func + offset
- ///< with file/line, symbol + offset, data, etc)
- eFormatHexFloat, ///< ISO C99 hex float string
- eFormatInstruction, ///< Disassemble an opcode
- eFormatVoid, ///< Do not print this
+ ///< with file/line, symbol + offset, data, etc)
+ eFormatHexFloat, ///< ISO C99 hex float string
+ eFormatInstruction, ///< Disassemble an opcode
+ eFormatVoid, ///< Do not print this
eFormatUnicode8,
+ eFormatFloat128, ///< Disambiguate between 128-bit `long double` (which uses
+ ///< `eFormatFloat`) and `__float128` (which uses
+ ///< `eFormatFloat128`). If the value being formatted is not
+ ///< 128 bits, then this is identical to `eFormatFloat`.
kNumFormats
};
@@ -838,7 +842,8 @@ enum BasicType {
eBasicTypeObjCClass,
eBasicTypeObjCSel,
eBasicTypeNullPtr,
- eBasicTypeOther
+ eBasicTypeOther,
+ eBasicTypeFloat128
};
/// Deprecated
diff --git a/lldb/packages/Python/lldbsuite/support/temp_file.py b/lldb/packages/Python/lldbsuite/support/temp_file.py
new file mode 100644
index 0000000..a21e212
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/support/temp_file.py
@@ -0,0 +1,23 @@
+"""
+Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+See https://llvm.org/LICENSE.txt for license information.
+SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+"""
+
+import os
+import tempfile
+
+
+class OnDiskTempFile:
+ def __init__(self, delete=True):
+ self.path = None
+
+ def __enter__(self):
+ fd, path = tempfile.mkstemp()
+ os.close(fd)
+ self.path = path
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if os.path.exists(self.path):
+ os.remove(self.path)
diff --git a/lldb/packages/Python/lldbsuite/test/builders/__init__.py b/lldb/packages/Python/lldbsuite/test/builders/__init__.py
index 9dd82cb..7331526 100644
--- a/lldb/packages/Python/lldbsuite/test/builders/__init__.py
+++ b/lldb/packages/Python/lldbsuite/test/builders/__init__.py
@@ -8,7 +8,15 @@ factory method below hands out builders based on the given platform.
def get_builder(platform):
"""Returns a Builder instance for the given platform."""
- if platform == "darwin":
+ if platform in [
+ "bridgeos",
+ "darwin",
+ "ios",
+ "macosx",
+ "tvos",
+ "watchos",
+ "xros",
+ ]:
from .darwin import BuilderDarwin
return BuilderDarwin()
diff --git a/lldb/packages/Python/lldbsuite/test/builders/builder.py b/lldb/packages/Python/lldbsuite/test/builders/builder.py
index ada6f9f..96c7b39 100644
--- a/lldb/packages/Python/lldbsuite/test/builders/builder.py
+++ b/lldb/packages/Python/lldbsuite/test/builders/builder.py
@@ -26,7 +26,7 @@ class Builder:
def getTriple(self, arch):
"""Returns the triple for the given architecture or None."""
- return None
+ return configuration.triple
def getExtraMakeArgs(self):
"""
@@ -37,6 +37,9 @@ class Builder:
def getArchCFlags(self, architecture):
"""Returns the ARCH_CFLAGS for the make system."""
+ triple = self.getTriple(architecture)
+ if triple:
+ return ["ARCH_CFLAGS=-target {}".format(triple)]
return []
def getMake(self, test_subdir, test_name):
diff --git a/lldb/packages/Python/lldbsuite/test/configuration.py b/lldb/packages/Python/lldbsuite/test/configuration.py
index b2d91fd2..5e38109 100644
--- a/lldb/packages/Python/lldbsuite/test/configuration.py
+++ b/lldb/packages/Python/lldbsuite/test/configuration.py
@@ -45,6 +45,9 @@ dsymutil = None
sdkroot = None
make_path = None
+# Allow specifying a triple for cross compilation.
+triple = None
+
# The overriden dwarf verison.
# Don't use this to test the current compiler's
# DWARF version, as this won't be set if the
@@ -141,6 +144,7 @@ enabled_plugins = []
# Typical values include Debug, Release, RelWithDebInfo and MinSizeRel
cmake_build_type = None
+
def shouldSkipBecauseOfCategories(test_categories):
if use_categories:
if (
diff --git a/lldb/packages/Python/lldbsuite/test/decorators.py b/lldb/packages/Python/lldbsuite/test/decorators.py
index a5f5837..bd10bcc 100644
--- a/lldb/packages/Python/lldbsuite/test/decorators.py
+++ b/lldb/packages/Python/lldbsuite/test/decorators.py
@@ -20,6 +20,7 @@ from . import configuration
from . import test_categories
from . import lldbtest_config
from lldbsuite.support import funcutils
+from lldbsuite.support import temp_file
from lldbsuite.test import lldbplatform
from lldbsuite.test import lldbplatformutil
@@ -94,22 +95,23 @@ def _match_decorator_property(expected, actual):
def _compiler_supports(
- compiler, flag, source="int main() {}", output_file=tempfile.NamedTemporaryFile()
+ compiler, flag, source="int main() {}", output_file=temp_file.OnDiskTempFile()
):
"""Test whether the compiler supports the given flag."""
- if platform.system() == "Darwin":
- compiler = "xcrun " + compiler
- try:
- cmd = "echo '%s' | %s %s -x c -o %s -" % (
- source,
- compiler,
- flag,
- output_file.name,
- )
- subprocess.check_call(cmd, shell=True)
- except subprocess.CalledProcessError:
- return False
- return True
+ with output_file:
+ if platform.system() == "Darwin":
+ compiler = "xcrun " + compiler
+ try:
+ cmd = "echo '%s' | %s %s -x c -o %s -" % (
+ source,
+ compiler,
+ flag,
+ output_file.path,
+ )
+ subprocess.check_call(cmd, shell=True)
+ except subprocess.CalledProcessError:
+ return False
+ return True
def expectedFailureIf(condition, bugnumber=None):
@@ -876,19 +878,19 @@ def skipUnlessSupportedTypeAttribute(attr):
def compiler_doesnt_support_struct_attribute():
compiler_path = lldbplatformutil.getCompiler()
- f = tempfile.NamedTemporaryFile()
- cmd = [lldbplatformutil.getCompiler(), "-x", "c++", "-c", "-o", f.name, "-"]
- p = subprocess.Popen(
- cmd,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- universal_newlines=True,
- )
- stdout, stderr = p.communicate("struct __attribute__((%s)) Test {};" % attr)
- if attr in stderr:
- return "Compiler does not support attribute %s" % (attr)
- return None
+ with temp_file.OnDiskTempFile() as f:
+ cmd = [lldbplatformutil.getCompiler(), "-x", "c++", "-c", "-o", f.path, "-"]
+ p = subprocess.Popen(
+ cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True,
+ )
+ stdout, stderr = p.communicate("struct __attribute__((%s)) Test {};" % attr)
+ if attr in stderr:
+ return "Compiler does not support attribute %s" % (attr)
+ return None
return skipTestIfFn(compiler_doesnt_support_struct_attribute)
@@ -902,21 +904,21 @@ def skipUnlessHasCallSiteInfo(func):
if not compiler.startswith("clang"):
return "Test requires clang as compiler"
- f = tempfile.NamedTemporaryFile()
- cmd = (
- "echo 'int main() {}' | "
- "%s -g -glldb -O1 -S -emit-llvm -x c -o %s -" % (compiler_path, f.name)
- )
- if os.popen(cmd).close() is not None:
- return "Compiler can't compile with call site info enabled"
+ with temp_file.OnDiskTempFile() as f:
+ cmd = (
+ "echo 'int main() {}' | "
+ "%s -g -glldb -O1 -S -emit-llvm -x c -o %s -" % (compiler_path, f.path)
+ )
+ if os.popen(cmd).close() is not None:
+ return "Compiler can't compile with call site info enabled"
- with open(f.name, "r") as ir_output_file:
- buf = ir_output_file.read()
+ with open(f.path, "r") as ir_output_file:
+ buf = ir_output_file.read()
- if "DIFlagAllCallsDescribed" not in buf:
- return "Compiler did not introduce DIFlagAllCallsDescribed IR flag"
+ if "DIFlagAllCallsDescribed" not in buf:
+ return "Compiler did not introduce DIFlagAllCallsDescribed IR flag"
- return None
+ return None
return skipTestIfFn(is_compiler_clang_with_call_site_info)(func)
@@ -957,7 +959,7 @@ def skipUnlessUndefinedBehaviorSanitizer(func):
)
# We need to write out the object into a named temp file for inspection.
- outputf = tempfile.NamedTemporaryFile()
+ outputf = temp_file.OnDiskTempFile()
# Try to compile with ubsan turned on.
if not _compiler_supports(
@@ -969,7 +971,7 @@ def skipUnlessUndefinedBehaviorSanitizer(func):
return "Compiler cannot compile with -fsanitize=undefined"
# Check that we actually see ubsan instrumentation in the binary.
- cmd = "nm %s" % outputf.name
+ cmd = "nm %s" % outputf.path
with os.popen(cmd) as nm_output:
if "___ubsan_handle_divrem_overflow" not in nm_output.read():
return "Division by zero instrumentation is missing"
@@ -1037,40 +1039,37 @@ def skipUnlessAArch64MTELinuxCompiler(func):
def is_toolchain_with_mte():
compiler_path = lldbplatformutil.getCompiler()
- f = tempfile.NamedTemporaryFile(delete=False)
- if lldbplatformutil.getPlatform() == "windows":
- return "MTE tests are not compatible with 'windows'"
-
- # Note hostos may be Windows.
- f.close()
+ with temp_file.OnDiskTempFile() as f:
+ if lldbplatformutil.getPlatform() == "windows":
+ return "MTE tests are not compatible with 'windows'"
+
+ cmd = f"{compiler_path} -x c -o {f.path} -"
+ if (
+ subprocess.run(
+ cmd, shell=True, input="int main() {}".encode()
+ ).returncode
+ != 0
+ ):
+ # Cannot compile at all, don't skip the test
+ # so that we report the broken compiler normally.
+ return None
- cmd = f"{compiler_path} -x c -o {f.name} -"
- if (
- subprocess.run(cmd, shell=True, input="int main() {}".encode()).returncode
- != 0
- ):
- os.remove(f.name)
- # Cannot compile at all, don't skip the test
- # so that we report the broken compiler normally.
+ # We need the Linux headers and ACLE MTE intrinsics
+ test_src = """
+ #include <asm/hwcap.h>
+ #include <arm_acle.h>
+ #ifndef HWCAP2_MTE
+ #error
+ #endif
+ int main() {
+ void* ptr = __arm_mte_create_random_tag((void*)(0), 0);
+ }"""
+ cmd = f"{compiler_path} -march=armv8.5-a+memtag -x c -o {f.path} -"
+ res = subprocess.run(cmd, shell=True, input=test_src.encode())
+ if res.returncode != 0:
+ return "Toolchain does not support MTE"
return None
- # We need the Linux headers and ACLE MTE intrinsics
- test_src = """
- #include <asm/hwcap.h>
- #include <arm_acle.h>
- #ifndef HWCAP2_MTE
- #error
- #endif
- int main() {
- void* ptr = __arm_mte_create_random_tag((void*)(0), 0);
- }"""
- cmd = f"{compiler_path} -march=armv8.5-a+memtag -x c -o {f.name} -"
- res = subprocess.run(cmd, shell=True, input=test_src.encode())
- os.remove(f.name)
- if res.returncode != 0:
- return "Toolchain does not support MTE"
- return None
-
return skipTestIfFn(is_toolchain_with_mte)(func)
diff --git a/lldb/packages/Python/lldbsuite/test/dotest.py b/lldb/packages/Python/lldbsuite/test/dotest.py
index 24236e7..47a3c2e 100644
--- a/lldb/packages/Python/lldbsuite/test/dotest.py
+++ b/lldb/packages/Python/lldbsuite/test/dotest.py
@@ -43,6 +43,7 @@ from . import lldbtest_config
from . import test_categories
from . import test_result
from ..support import seven
+from ..support import temp_file
def is_exe(fpath):
@@ -321,8 +322,13 @@ def parseOptionsAndInitTestdirs():
logging.error("No SDK found with the name %s; aborting...", args.apple_sdk)
sys.exit(-1)
+ if args.triple:
+ configuration.triple = args.triple
+
if args.arch:
configuration.arch = args.arch
+ elif args.triple:
+ configuration.arch = args.triple.split("-")[0]
else:
configuration.arch = platform_machine
@@ -780,8 +786,8 @@ def canRunLibcxxTests():
return True, "libc++ always present"
if platform == "linux":
- with tempfile.NamedTemporaryFile() as f:
- cmd = [configuration.compiler, "-xc++", "-stdlib=libc++", "-o", f.name, "-"]
+ with temp_file.OnDiskTempFile() as f:
+ cmd = [configuration.compiler, "-xc++", "-stdlib=libc++", "-o", f.path, "-"]
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
@@ -840,8 +846,8 @@ def canRunMsvcStlTests():
if platform != "windows":
return False, f"Don't know how to build with MSVC's STL on {platform}"
- with tempfile.NamedTemporaryFile() as f:
- cmd = [configuration.compiler, "-xc++", "-o", f.name, "-E", "-"]
+ with temp_file.OnDiskTempFile() as f:
+ cmd = [configuration.compiler, "-xc++", "-o", f.path, "-E", "-"]
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
diff --git a/lldb/packages/Python/lldbsuite/test/dotest_args.py b/lldb/packages/Python/lldbsuite/test/dotest_args.py
index e9c2138..fce9e41 100644
--- a/lldb/packages/Python/lldbsuite/test/dotest_args.py
+++ b/lldb/packages/Python/lldbsuite/test/dotest_args.py
@@ -58,6 +58,14 @@ def create_parser():
"""Specify the path to sysroot. This overrides apple_sdk sysroot."""
),
)
+ group.add_argument(
+ "--triple",
+ metavar="triple",
+ dest="triple",
+ help=textwrap.dedent(
+ """Specify the target triple. Used for cross compilation."""
+ ),
+ )
if sys.platform == "darwin":
group.add_argument(
"--apple-sdk",
diff --git a/lldb/packages/Python/lldbsuite/test/lldbtest.py b/lldb/packages/Python/lldbsuite/test/lldbtest.py
index a74961e..0fc85fc 100644
--- a/lldb/packages/Python/lldbsuite/test/lldbtest.py
+++ b/lldb/packages/Python/lldbsuite/test/lldbtest.py
@@ -1517,7 +1517,7 @@ class Base(unittest.TestCase):
testname = self.getBuildDirBasename()
module = builder_module()
- command = builder_module().getBuildCommand(
+ command = module.getBuildCommand(
debug_info,
architecture,
compiler,
@@ -1778,16 +1778,15 @@ class LLDBTestCaseFactory(type):
attrvalue, "__no_debug_info_test__", False
):
# If any debug info categories were explicitly tagged, assume that list to be
- # authoritative. If none were specified, try with all debug
- # info formats.
+ # authoritative. If none were specified, try with all debug info formats.
+ test_method_categories = set(getattr(attrvalue, "categories", []))
all_dbginfo_categories = set(
test_categories.debug_info_categories.keys()
)
- categories = (
- set(getattr(attrvalue, "categories", [])) & all_dbginfo_categories
- )
- if not categories:
- categories = [
+ dbginfo_categories = test_method_categories & all_dbginfo_categories
+ other_categories = list(test_method_categories - all_dbginfo_categories)
+ if not dbginfo_categories:
+ dbginfo_categories = [
category
for category, can_replicate in test_categories.debug_info_categories.items()
if can_replicate
@@ -1799,9 +1798,8 @@ class LLDBTestCaseFactory(type):
skip_for_debug_info_cat_fn = getattr(
attrvalue, "__skip_for_debug_info_cat_fn__", no_reason
)
- for cat in categories:
+ for cat in dbginfo_categories:
- @decorators.add_test_categories([cat])
@wraps(attrvalue)
def test_method(self, attrvalue=attrvalue):
return attrvalue(self)
@@ -1809,6 +1807,7 @@ class LLDBTestCaseFactory(type):
method_name = attrname + "_" + cat
test_method.__name__ = method_name
test_method.debug_info = cat
+ test_method.categories = other_categories + [cat]
xfail_reason = xfail_for_debug_info_cat_fn(cat)
if xfail_reason:
diff --git a/lldb/packages/Python/lldbsuite/test/make/Makefile.rules b/lldb/packages/Python/lldbsuite/test/make/Makefile.rules
index 8521ca5..e72ffd1 100644
--- a/lldb/packages/Python/lldbsuite/test/make/Makefile.rules
+++ b/lldb/packages/Python/lldbsuite/test/make/Makefile.rules
@@ -149,6 +149,16 @@ else
endif
#----------------------------------------------------------------------
+# Use LLD when cross compiling on Darwin.
+#----------------------------------------------------------------------
+ifeq "$(HOST_OS)" "Darwin"
+ ifneq (,$(filter $(OS), Android FreeBSD Linux NetBSD Windows_NT))
+ LDFLAGS += -fuse-ld=lld
+ endif
+endif
+
+
+#----------------------------------------------------------------------
# ARCHFLAG is the flag used to tell the compiler which architecture
# to compile for. The default is the flag that clang accepts.
#----------------------------------------------------------------------
diff --git a/lldb/scripts/framework-header-fix.py b/lldb/scripts/framework-header-fix.py
index aa034db..36c5c67 100755
--- a/lldb/scripts/framework-header-fix.py
+++ b/lldb/scripts/framework-header-fix.py
@@ -112,7 +112,7 @@ def main():
# but passing them in with dashes for this script causes argparse to think that they're
# arguments in and of themself, so they need to passed in without dashes.
if args.unifdef_guards:
- unifdef_guards = ["-" + guard for guard in args.unifdef_guards]
+ unifdef_guards = ["-U" + guard for guard in args.unifdef_guards]
# Create the framework's header dir if it doesn't already exist
if not os.path.exists(os.path.dirname(output_file_path)):
diff --git a/lldb/source/Commands/CommandObjectDWIMPrint.cpp b/lldb/source/Commands/CommandObjectDWIMPrint.cpp
index a2c004d..d7589cc 100644
--- a/lldb/source/Commands/CommandObjectDWIMPrint.cpp
+++ b/lldb/source/Commands/CommandObjectDWIMPrint.cpp
@@ -18,11 +18,14 @@
#include "lldb/Interpreter/OptionGroupValueObjectDisplay.h"
#include "lldb/Target/StackFrame.h"
#include "lldb/Utility/ConstString.h"
+#include "lldb/Utility/LLDBLog.h"
+#include "lldb/Utility/Log.h"
#include "lldb/ValueObject/ValueObject.h"
#include "lldb/lldb-defines.h"
#include "lldb/lldb-enumerations.h"
#include "lldb/lldb-forward.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
#include <regex>
@@ -132,27 +135,22 @@ void CommandObjectDWIMPrint::DoExecute(StringRef command,
};
// Dump `valobj` according to whether `po` was requested or not.
- auto dump_val_object = [&](ValueObject &valobj) {
+ auto dump_val_object = [&](ValueObject &valobj) -> Error {
if (is_po) {
StreamString temp_result_stream;
- if (llvm::Error error = valobj.Dump(temp_result_stream, dump_options)) {
- result.AppendError(toString(std::move(error)));
- return;
- }
+ if (Error err = valobj.Dump(temp_result_stream, dump_options))
+ return err;
llvm::StringRef output = temp_result_stream.GetString();
maybe_add_hint(output);
result.GetOutputStream() << output;
} else {
- llvm::Error error =
- valobj.Dump(result.GetOutputStream(), dump_options);
- if (error) {
- result.AppendError(toString(std::move(error)));
- return;
- }
+ if (Error err = valobj.Dump(result.GetOutputStream(), dump_options))
+ return err;
}
m_interpreter.PrintWarningsIfNecessary(result.GetOutputStream(),
m_cmd_name);
result.SetStatus(eReturnStatusSuccessFinishResult);
+ return Error::success();
};
// First, try `expr` as a _limited_ frame variable expression path: only the
@@ -186,8 +184,13 @@ void CommandObjectDWIMPrint::DoExecute(StringRef command,
expr);
}
- dump_val_object(*valobj_sp);
- return;
+ Error err = dump_val_object(*valobj_sp);
+ if (!err)
+ return;
+
+ // Dump failed, continue on to expression evaluation.
+ LLDB_LOG_ERROR(GetLog(LLDBLog::Expressions), std::move(err),
+ "could not print frame variable '{1}': {0}", expr);
}
}
@@ -196,8 +199,14 @@ void CommandObjectDWIMPrint::DoExecute(StringRef command,
if (auto *state = target.GetPersistentExpressionStateForLanguage(language))
if (auto var_sp = state->GetVariable(expr))
if (auto valobj_sp = var_sp->GetValueObject()) {
- dump_val_object(*valobj_sp);
- return;
+ Error err = dump_val_object(*valobj_sp);
+ if (!err)
+ return;
+
+ // Dump failed, continue on to expression evaluation.
+ LLDB_LOG_ERROR(GetLog(LLDBLog::Expressions), std::move(err),
+ "could not print persistent variable '{1}': {0}",
+ expr);
}
// Third, and lastly, try `expr` as a source expression to evaluate.
@@ -248,10 +257,12 @@ void CommandObjectDWIMPrint::DoExecute(StringRef command,
result.AppendNoteWithFormatv("ran `expression {0}{1}`", flags, expr);
}
- if (valobj_sp->GetError().GetError() != UserExpression::kNoResult)
- dump_val_object(*valobj_sp);
- else
+ if (valobj_sp->GetError().GetError() != UserExpression::kNoResult) {
+ if (Error err = dump_val_object(*valobj_sp))
+ result.SetError(std::move(err));
+ } else {
result.SetStatus(eReturnStatusSuccessFinishNoResult);
+ }
if (suppress_result)
if (auto result_var_sp =
diff --git a/lldb/source/Commands/CommandObjectMemory.cpp b/lldb/source/Commands/CommandObjectMemory.cpp
index 5792c13..af1ff3e 100644
--- a/lldb/source/Commands/CommandObjectMemory.cpp
+++ b/lldb/source/Commands/CommandObjectMemory.cpp
@@ -156,6 +156,7 @@ public:
case eFormatBinary:
case eFormatFloat:
+ case eFormatFloat128:
case eFormatOctal:
case eFormatDecimal:
case eFormatEnum:
@@ -1356,6 +1357,7 @@ protected:
switch (m_format_options.GetFormat()) {
case kNumFormats:
case eFormatFloat: // TODO: add support for floats soon
+ case eFormatFloat128:
case eFormatCharPrintable:
case eFormatBytesWithASCII:
case eFormatComplex:
diff --git a/lldb/source/Core/DumpDataExtractor.cpp b/lldb/source/Core/DumpDataExtractor.cpp
index 7214073..37dffc7 100644
--- a/lldb/source/Core/DumpDataExtractor.cpp
+++ b/lldb/source/Core/DumpDataExtractor.cpp
@@ -318,14 +318,15 @@ static void printMemoryTags(const DataExtractor &DE, Stream *s,
}
static const llvm::fltSemantics &GetFloatSemantics(const TargetSP &target_sp,
- size_t byte_size) {
+ size_t byte_size,
+ lldb::Format format) {
if (target_sp) {
auto type_system_or_err =
target_sp->GetScratchTypeSystemForLanguage(eLanguageTypeC);
if (!type_system_or_err)
llvm::consumeError(type_system_or_err.takeError());
else if (auto ts = *type_system_or_err)
- return ts->GetFloatTypeSemantics(byte_size);
+ return ts->GetFloatTypeSemantics(byte_size, format);
}
// No target, just make a reasonable guess
switch(byte_size) {
@@ -335,7 +336,13 @@ static const llvm::fltSemantics &GetFloatSemantics(const TargetSP &target_sp,
return llvm::APFloat::IEEEsingle();
case 8:
return llvm::APFloat::IEEEdouble();
- }
+ case 16:
+ if (format == eFormatFloat128) {
+ return llvm::APFloat::IEEEquad();
+ }
+ // Otherwise it's ambigious whether a 16-byte float is a float128 or a
+ // target-specific long double.
+ }
return llvm::APFloat::Bogus();
}
@@ -653,6 +660,7 @@ lldb::offset_t lldb_private::DumpDataExtractor(
}
} break;
+ case eFormatFloat128:
case eFormatFloat: {
TargetSP target_sp;
if (exe_scope)
@@ -666,7 +674,7 @@ lldb::offset_t lldb_private::DumpDataExtractor(
const unsigned format_precision = 0;
const llvm::fltSemantics &semantics =
- GetFloatSemantics(target_sp, item_byte_size);
+ GetFloatSemantics(target_sp, item_byte_size, item_format);
// Recalculate the byte size in case of a difference. This is possible
// when item_byte_size is 16 (128-bit), because you could get back the
diff --git a/lldb/source/Core/Module.cpp b/lldb/source/Core/Module.cpp
index 90997da..f27a95d 100644
--- a/lldb/source/Core/Module.cpp
+++ b/lldb/source/Core/Module.cpp
@@ -130,8 +130,10 @@ Module *Module::GetAllocatedModuleAtIndex(size_t idx) {
return nullptr;
}
+static std::atomic<lldb::user_id_t> g_unique_id = 1;
+
Module::Module(const ModuleSpec &module_spec)
- : m_unwind_table(*this), m_file_has_changed(false),
+ : UserID(g_unique_id++), m_unwind_table(*this), m_file_has_changed(false),
m_first_file_changed_log(false) {
// Scope for locker below...
{
@@ -236,7 +238,8 @@ Module::Module(const ModuleSpec &module_spec)
Module::Module(const FileSpec &file_spec, const ArchSpec &arch,
ConstString object_name, lldb::offset_t object_offset,
const llvm::sys::TimePoint<> &object_mod_time)
- : m_mod_time(FileSystem::Instance().GetModificationTime(file_spec)),
+ : UserID(g_unique_id++),
+ m_mod_time(FileSystem::Instance().GetModificationTime(file_spec)),
m_arch(arch), m_file(file_spec), m_object_name(object_name),
m_object_offset(object_offset), m_object_mod_time(object_mod_time),
m_unwind_table(*this), m_file_has_changed(false),
@@ -257,7 +260,7 @@ Module::Module(const FileSpec &file_spec, const ArchSpec &arch,
}
Module::Module()
- : m_unwind_table(*this), m_file_has_changed(false),
+ : UserID(g_unique_id++), m_unwind_table(*this), m_file_has_changed(false),
m_first_file_changed_log(false) {
std::lock_guard<std::recursive_mutex> guard(
GetAllocationModuleCollectionMutex());
diff --git a/lldb/source/Core/ModuleList.cpp b/lldb/source/Core/ModuleList.cpp
index d2e5be8..01f46b6 100644
--- a/lldb/source/Core/ModuleList.cpp
+++ b/lldb/source/Core/ModuleList.cpp
@@ -584,6 +584,20 @@ ModuleSP ModuleList::FindModule(const UUID &uuid) const {
return module_sp;
}
+ModuleSP ModuleList::FindModule(lldb::user_id_t uid) const {
+ ModuleSP module_sp;
+ ForEach([&](const ModuleSP &m) {
+ if (m->GetID() == uid) {
+ module_sp = m;
+ return IterationAction::Stop;
+ }
+
+ return IterationAction::Continue;
+ });
+
+ return module_sp;
+}
+
void ModuleList::FindTypes(Module *search_first, const TypeQuery &query,
TypeResults &results) const {
std::lock_guard<std::recursive_mutex> guard(m_modules_mutex);
diff --git a/lldb/source/DataFormatters/FormatManager.cpp b/lldb/source/DataFormatters/FormatManager.cpp
index 7862fb8..8595f81 100644
--- a/lldb/source/DataFormatters/FormatManager.cpp
+++ b/lldb/source/DataFormatters/FormatManager.cpp
@@ -72,6 +72,7 @@ static constexpr FormatInfo g_format_infos[] = {
{eFormatInstruction, 'i', "instruction"},
{eFormatVoid, 'v', "void"},
{eFormatUnicode8, 'u', "unicode8"},
+ {eFormatFloat128, '\0', "float128"},
};
static_assert((sizeof(g_format_infos) / sizeof(g_format_infos[0])) ==
diff --git a/lldb/source/DataFormatters/VectorType.cpp b/lldb/source/DataFormatters/VectorType.cpp
index 8a842b8..c2355fb 100644
--- a/lldb/source/DataFormatters/VectorType.cpp
+++ b/lldb/source/DataFormatters/VectorType.cpp
@@ -55,6 +55,8 @@ static CompilerType GetCompilerTypeForFormat(lldb::Format format,
case lldb::eFormatFloat:
return type_system->GetBasicTypeFromAST(lldb::eBasicTypeFloat);
+ case lldb::eFormatFloat128:
+ return type_system->GetBasicTypeFromAST(lldb::eBasicTypeFloat128);
case lldb::eFormatHex:
case lldb::eFormatHexUppercase:
diff --git a/lldb/source/Expression/Expression.cpp b/lldb/source/Expression/Expression.cpp
index 93f585e..796851f 100644
--- a/lldb/source/Expression/Expression.cpp
+++ b/lldb/source/Expression/Expression.cpp
@@ -10,6 +10,11 @@
#include "lldb/Target/ExecutionContextScope.h"
#include "lldb/Target/Target.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+
using namespace lldb_private;
Expression::Expression(Target &target)
@@ -26,3 +31,47 @@ Expression::Expression(ExecutionContextScope &exe_scope)
m_jit_end_addr(LLDB_INVALID_ADDRESS) {
assert(m_target_wp.lock());
}
+
+llvm::Expected<FunctionCallLabel>
+lldb_private::FunctionCallLabel::fromString(llvm::StringRef label) {
+ llvm::SmallVector<llvm::StringRef, 4> components;
+ label.split(components, ":", /*MaxSplit=*/3);
+
+ if (components.size() != 4)
+ return llvm::createStringError("malformed function call label.");
+
+ if (components[0] != FunctionCallLabelPrefix)
+ return llvm::createStringError(llvm::formatv(
+ "expected function call label prefix '{0}' but found '{1}' instead.",
+ FunctionCallLabelPrefix, components[0]));
+
+ llvm::StringRef module_label = components[1];
+ llvm::StringRef die_label = components[2];
+
+ lldb::user_id_t module_id = 0;
+ if (!llvm::to_integer(module_label, module_id))
+ return llvm::createStringError(
+ llvm::formatv("failed to parse module ID from '{0}'.", module_label));
+
+ lldb::user_id_t die_id;
+ if (!llvm::to_integer(die_label, die_id))
+ return llvm::createStringError(
+ llvm::formatv("failed to parse symbol ID from '{0}'.", die_label));
+
+ return FunctionCallLabel{/*.module_id=*/module_id,
+ /*.symbol_id=*/die_id,
+ /*.lookup_name=*/components[3]};
+}
+
+std::string lldb_private::FunctionCallLabel::toString() const {
+ return llvm::formatv("{0}:{1:x}:{2:x}:{3}", FunctionCallLabelPrefix,
+ module_id, symbol_id, lookup_name)
+ .str();
+}
+
+void llvm::format_provider<FunctionCallLabel>::format(
+ const FunctionCallLabel &label, raw_ostream &OS, StringRef Style) {
+ OS << llvm::formatv("FunctionCallLabel{ module_id: {0:x}, symbol_id: {1:x}, "
+ "lookup_name: {2} }",
+ label.module_id, label.symbol_id, label.lookup_name);
+}
diff --git a/lldb/source/Expression/IRExecutionUnit.cpp b/lldb/source/Expression/IRExecutionUnit.cpp
index 6f812b9..5e40df2 100644
--- a/lldb/source/Expression/IRExecutionUnit.cpp
+++ b/lldb/source/Expression/IRExecutionUnit.cpp
@@ -13,6 +13,7 @@
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
@@ -20,6 +21,7 @@
#include "lldb/Core/Disassembler.h"
#include "lldb/Core/Module.h"
#include "lldb/Core/Section.h"
+#include "lldb/Expression/Expression.h"
#include "lldb/Expression/IRExecutionUnit.h"
#include "lldb/Expression/ObjectFileJIT.h"
#include "lldb/Host/HostInfo.h"
@@ -36,6 +38,7 @@
#include "lldb/Utility/LLDBAssert.h"
#include "lldb/Utility/LLDBLog.h"
#include "lldb/Utility/Log.h"
+#include "lldb/lldb-defines.h"
#include <optional>
@@ -771,6 +774,40 @@ private:
lldb::addr_t m_best_internal_load_address = LLDB_INVALID_ADDRESS;
};
+/// Returns address of the function referred to by the special function call
+/// label \c label.
+static llvm::Expected<lldb::addr_t>
+ResolveFunctionCallLabel(const FunctionCallLabel &label,
+ const lldb_private::SymbolContext &sc,
+ bool &symbol_was_missing_weak) {
+ symbol_was_missing_weak = false;
+
+ if (!sc.target_sp)
+ return llvm::createStringError("target not available.");
+
+ auto module_sp = sc.target_sp->GetImages().FindModule(label.module_id);
+ if (!module_sp)
+ return llvm::createStringError(
+ llvm::formatv("failed to find module by UID {0}", label.module_id));
+
+ auto *symbol_file = module_sp->GetSymbolFile();
+ if (!symbol_file)
+ return llvm::createStringError(
+ llvm::formatv("no SymbolFile found on module {0:x}.", module_sp.get()));
+
+ auto sc_or_err = symbol_file->ResolveFunctionCallLabel(label);
+ if (!sc_or_err)
+ return llvm::joinErrors(
+ llvm::createStringError("failed to resolve function by UID"),
+ sc_or_err.takeError());
+
+ SymbolContextList sc_list;
+ sc_list.Append(*sc_or_err);
+
+ LoadAddressResolver resolver(*sc.target_sp, symbol_was_missing_weak);
+ return resolver.Resolve(sc_list).value_or(LLDB_INVALID_ADDRESS);
+}
+
lldb::addr_t
IRExecutionUnit::FindInSymbols(const std::vector<ConstString> &names,
const lldb_private::SymbolContext &sc,
@@ -906,6 +943,34 @@ lldb::addr_t IRExecutionUnit::FindInUserDefinedSymbols(
lldb::addr_t IRExecutionUnit::FindSymbol(lldb_private::ConstString name,
bool &missing_weak) {
+ if (name.GetStringRef().starts_with(FunctionCallLabelPrefix)) {
+ auto label_or_err = FunctionCallLabel::fromString(name);
+ if (!label_or_err) {
+ LLDB_LOG_ERROR(GetLog(LLDBLog::Expressions), label_or_err.takeError(),
+ "failed to create FunctionCallLabel from '{1}': {0}",
+ name.GetStringRef());
+ return LLDB_INVALID_ADDRESS;
+ }
+
+ if (auto addr_or_err =
+ ResolveFunctionCallLabel(*label_or_err, m_sym_ctx, missing_weak)) {
+ return *addr_or_err;
+ } else {
+ LLDB_LOG_ERROR(GetLog(LLDBLog::Expressions), addr_or_err.takeError(),
+ "Failed to resolve function call label '{1}': {0}",
+ name.GetStringRef());
+
+ // Fall back to lookup by name despite error in resolving the label.
+ // May happen in practice if the definition of a function lives in
+ // a different lldb_private::Module than it's declaration. Meaning
+ // we couldn't pin-point it using the information encoded in the label.
+ name.SetString(label_or_err->lookup_name);
+ }
+ }
+
+ // TODO: now with function call labels, do we still need to
+ // generate alternate manglings?
+
std::vector<ConstString> candidate_C_names;
std::vector<ConstString> candidate_CPlusPlus_names;
diff --git a/lldb/source/Expression/IRInterpreter.cpp b/lldb/source/Expression/IRInterpreter.cpp
index fa74e88..9140483 100644
--- a/lldb/source/Expression/IRInterpreter.cpp
+++ b/lldb/source/Expression/IRInterpreter.cpp
@@ -259,7 +259,9 @@ public:
break;
case Value::FunctionVal:
if (const Function *constant_func = dyn_cast<Function>(constant)) {
- lldb_private::ConstString name(constant_func->getName());
+ lldb_private::ConstString name(
+ llvm::GlobalValue::dropLLVMManglingEscape(
+ constant_func->getName()));
bool missing_weak = false;
lldb::addr_t addr = m_execution_unit.FindSymbol(name, missing_weak);
if (addr == LLDB_INVALID_ADDRESS)
diff --git a/lldb/source/Expression/Materializer.cpp b/lldb/source/Expression/Materializer.cpp
index 17ea159..329768d 100644
--- a/lldb/source/Expression/Materializer.cpp
+++ b/lldb/source/Expression/Materializer.cpp
@@ -102,22 +102,23 @@ public:
m_persistent_variable_sp->GetName(), mem, eAddressTypeLoad,
map.GetAddressByteSize());
- if (m_persistent_variable_sp->m_flags &
- ExpressionVariable::EVKeepInTarget) {
- if (used_policy == IRMemoryMap::eAllocationPolicyMirror) {
+ if (used_policy == IRMemoryMap::eAllocationPolicyMirror) {
+ if (m_persistent_variable_sp->m_flags &
+ ExpressionVariable::EVKeepInTarget) {
// Clear the flag if the variable will never be deallocated.
Status leak_error;
map.Leak(mem, leak_error);
m_persistent_variable_sp->m_flags &=
~ExpressionVariable::EVNeedsAllocation;
- } else {
- // If the variable cannot be kept in target, clear this flag...
- m_persistent_variable_sp->m_flags &=
- ~ExpressionVariable::EVKeepInTarget;
- // ...and set the flag to copy the value during dematerialization.
- m_persistent_variable_sp->m_flags |=
- ExpressionVariable::EVNeedsFreezeDry;
}
+ } else {
+ // If we cannot allocate memory in the process,
+ // - clear the 'EVKeepInTarget' flag to ensure that 'm_live_sp' is reset
+ // during dematerialization,
+ m_persistent_variable_sp->m_flags &= ~ExpressionVariable::EVKeepInTarget;
+ // - set the 'EVNeedsFreezeDry' flag so that the value is copied to
+ // 'm_frozen_sp' during dematerialization.
+ m_persistent_variable_sp->m_flags |= ExpressionVariable::EVNeedsFreezeDry;
}
// Write the contents of the variable to the area.
diff --git a/lldb/source/Host/common/Host.cpp b/lldb/source/Host/common/Host.cpp
index 5992b54..510f9c7 100644
--- a/lldb/source/Host/common/Host.cpp
+++ b/lldb/source/Host/common/Host.cpp
@@ -82,10 +82,11 @@ int __pthread_fchdir(int fildes);
using namespace lldb;
using namespace lldb_private;
-#if !defined(__APPLE__)
-// The system log is currently only meaningful on Darwin, where this means
-// os_log. The meaning of a "system log" isn't as clear on other platforms, and
-// therefore we don't providate a default implementation. Vendors are free to
+#if !defined(__APPLE__) && !defined(_WIN32)
+// The system log is currently only meaningful on Darwin and Windows.
+// On Darwin, this means os_log. On Windows this means Events Viewer.
+// The meaning of a "system log" isn't as clear on other platforms, and
+// therefore we don't providate a default implementation. Vendors are free
// to implement this function if they have a use for it.
void Host::SystemLog(Severity severity, llvm::StringRef message) {}
#endif
diff --git a/lldb/source/Host/windows/Host.cpp b/lldb/source/Host/windows/Host.cpp
index a7369e7..4e747f7 100644
--- a/lldb/source/Host/windows/Host.cpp
+++ b/lldb/source/Host/windows/Host.cpp
@@ -22,7 +22,9 @@
#include "lldb/Utility/StreamString.h"
#include "lldb/Utility/StructuredData.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/ManagedStatic.h"
// Windows includes
#include <tlhelp32.h>
@@ -302,3 +304,64 @@ Environment Host::GetEnvironment() {
}
return env;
}
+
+/// Manages the lifecycle of a Windows Event's Source.
+/// The destructor will call DeregisterEventSource.
+/// This class is meant to be used with \ref llvm::ManagedStatic.
+class WindowsEventLog {
+public:
+ WindowsEventLog() : handle(RegisterEventSource(nullptr, L"lldb")) {}
+
+ ~WindowsEventLog() {
+ if (handle)
+ DeregisterEventSource(handle);
+ }
+
+ HANDLE GetHandle() const { return handle; }
+
+private:
+ HANDLE handle;
+};
+
+static llvm::ManagedStatic<WindowsEventLog> event_log;
+
+static std::wstring AnsiToUtf16(const std::string &ansi) {
+ if (ansi.empty())
+ return {};
+
+ const int unicode_length =
+ MultiByteToWideChar(CP_ACP, 0, ansi.c_str(), -1, nullptr, 0);
+ if (unicode_length == 0)
+ return {};
+
+ std::wstring unicode(unicode_length, L'\0');
+ MultiByteToWideChar(CP_ACP, 0, ansi.c_str(), -1, &unicode[0], unicode_length);
+ return unicode;
+}
+
+void Host::SystemLog(Severity severity, llvm::StringRef message) {
+ HANDLE h = event_log->GetHandle();
+ if (!h)
+ return;
+
+ std::wstring wide_message = AnsiToUtf16(message.str());
+ if (wide_message.empty())
+ return;
+
+ LPCWSTR msg_ptr = wide_message.c_str();
+
+ WORD event_type;
+ switch (severity) {
+ case lldb::eSeverityWarning:
+ event_type = EVENTLOG_WARNING_TYPE;
+ break;
+ case lldb::eSeverityError:
+ event_type = EVENTLOG_ERROR_TYPE;
+ break;
+ case lldb::eSeverityInfo:
+ default:
+ event_type = EVENTLOG_INFORMATION_TYPE;
+ }
+
+ ReportEventW(h, event_type, 0, 0, nullptr, 1, 0, &msg_ptr, nullptr);
+}
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp
index 9f77fbc..214e260 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp
@@ -1991,7 +1991,7 @@ void ClangExpressionDeclMap::AddContextClassType(NameSearchContext &context,
const bool is_artificial = false;
CXXMethodDecl *method_decl = m_clang_ast_context->AddMethodToCXXRecordType(
- copied_clang_type.GetOpaqueQualType(), "$__lldb_expr", nullptr,
+ copied_clang_type.GetOpaqueQualType(), "$__lldb_expr", /*asm_label=*/{},
method_type, lldb::eAccessPublic, is_virtual, is_static, is_inline,
is_explicit, is_attr_used, is_artificial);
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp
index 3995bc0..e5a1d2d 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionParser.cpp
@@ -894,7 +894,7 @@ ClangExpressionParser::ClangExpressionParser(
m_llvm_context = std::make_unique<LLVMContext>();
m_code_generator.reset(CreateLLVMCodeGen(
m_compiler->getDiagnostics(), module_name,
- &m_compiler->getVirtualFileSystem(), m_compiler->getHeaderSearchOpts(),
+ m_compiler->getVirtualFileSystemPtr(), m_compiler->getHeaderSearchOpts(),
m_compiler->getPreprocessorOpts(), m_compiler->getCodeGenOpts(),
*m_llvm_context));
}
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionSourceCode.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionSourceCode.cpp
index 06f3a7e..ff9ed9c 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionSourceCode.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionSourceCode.cpp
@@ -260,9 +260,8 @@ TokenVerifier::TokenVerifier(std::string body) {
// Let's build the actual source code Clang needs and setup some utility
// objects.
- llvm::IntrusiveRefCntPtr<DiagnosticIDs> diag_ids(new DiagnosticIDs());
DiagnosticOptions diags_opts;
- DiagnosticsEngine diags(diag_ids, diags_opts);
+ DiagnosticsEngine diags(DiagnosticIDs::create(), diags_opts);
clang::SourceManager SM(diags, file_mgr);
auto buf = llvm::MemoryBuffer::getMemBuffer(body);
diff --git a/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp b/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp
index 2f838b3..d54f072 100644
--- a/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp
+++ b/lldb/source/Plugins/ExpressionParser/Clang/ClangModulesDeclVendor.cpp
@@ -747,7 +747,7 @@ ClangModulesDeclVendor::Create(Target &target) {
// Make sure clang uses the same VFS as LLDB.
instance->createFileManager(FileSystem::Instance().GetVirtualFileSystem());
- instance->setDiagnostics(diagnostics_engine.get());
+ instance->setDiagnostics(diagnostics_engine);
std::unique_ptr<clang::FrontendAction> action(new clang::SyntaxOnlyAction);
diff --git a/lldb/source/Plugins/Instruction/RISCV/EmulateInstructionRISCV.cpp b/lldb/source/Plugins/Instruction/RISCV/EmulateInstructionRISCV.cpp
index 2adde02..5e429a9 100644
--- a/lldb/source/Plugins/Instruction/RISCV/EmulateInstructionRISCV.cpp
+++ b/lldb/source/Plugins/Instruction/RISCV/EmulateInstructionRISCV.cpp
@@ -1807,7 +1807,7 @@ RISCVSingleStepBreakpointLocationsPredictor::GetBreakpointLocations(
Log *log = GetLog(LLDBLog::Unwind);
LLDB_LOGF(log,
"RISCVSingleStepBreakpointLocationsPredictor::%s: can't find "
- "corresponding load reserve insturuction",
+ "corresponding load reserve instruction",
__FUNCTION__);
return {*pc + (inst->is_rvc ? 2u : 4u)};
}
@@ -1839,7 +1839,7 @@ RISCVSingleStepBreakpointLocationsPredictor::HandleAtomicSequence(
EmulateInstructionRISCV *riscv_emulator =
static_cast<EmulateInstructionRISCV *>(m_emulator_up.get());
- // Handle instructions between LR and SC. According to unprivilleged
+ // Handle instructions between LR and SC. According to unprivileged
// RISC-V ISA there can be at most 16 instructions in the sequence.
lldb::addr_t entry_pc = pc; // LR instruction address
@@ -1872,7 +1872,7 @@ RISCVSingleStepBreakpointLocationsPredictor::HandleAtomicSequence(
Log *log = GetLog(LLDBLog::Unwind);
LLDB_LOGF(log,
"RISCVSingleStepBreakpointLocationsPredictor::%s: can't find "
- "corresponding store conditional insturuction",
+ "corresponding store conditional instruction",
__FUNCTION__);
return {entry_pc + (lr_inst->is_rvc ? 2u : 4u)};
}
diff --git a/lldb/source/Plugins/Language/ClangCommon/ClangHighlighter.cpp b/lldb/source/Plugins/Language/ClangCommon/ClangHighlighter.cpp
index 8cc5714..9cb5ea3 100644
--- a/lldb/source/Plugins/Language/ClangCommon/ClangHighlighter.cpp
+++ b/lldb/source/Plugins/Language/ClangCommon/ClangHighlighter.cpp
@@ -162,9 +162,8 @@ void ClangHighlighter::Highlight(const HighlightStyle &options,
// Let's build the actual source code Clang needs and setup some utility
// objects.
std::string full_source = previous_lines.str() + line.str();
- llvm::IntrusiveRefCntPtr<DiagnosticIDs> diag_ids(new DiagnosticIDs());
DiagnosticOptions diags_opts;
- DiagnosticsEngine diags(diag_ids, diags_opts);
+ DiagnosticsEngine diags(DiagnosticIDs::create(), diags_opts);
clang::SourceManager SM(diags, file_mgr);
auto buf = llvm::MemoryBuffer::getMemBuffer(full_source);
diff --git a/lldb/source/Plugins/Process/wasm/CMakeLists.txt b/lldb/source/Plugins/Process/wasm/CMakeLists.txt
index ff8a3c7..779b97e 100644
--- a/lldb/source/Plugins/Process/wasm/CMakeLists.txt
+++ b/lldb/source/Plugins/Process/wasm/CMakeLists.txt
@@ -1,5 +1,6 @@
add_lldb_library(lldbPluginProcessWasm PLUGIN
ProcessWasm.cpp
+ RegisterContextWasm.cpp
ThreadWasm.cpp
UnwindWasm.cpp
diff --git a/lldb/source/Plugins/Process/wasm/ProcessWasm.cpp b/lldb/source/Plugins/Process/wasm/ProcessWasm.cpp
index 5eeabec..580e8c1 100644
--- a/lldb/source/Plugins/Process/wasm/ProcessWasm.cpp
+++ b/lldb/source/Plugins/Process/wasm/ProcessWasm.cpp
@@ -131,3 +131,36 @@ ProcessWasm::GetWasmCallStack(lldb::tid_t tid) {
return call_stack_pcs;
}
+
+llvm::Expected<lldb::DataBufferSP>
+ProcessWasm::GetWasmVariable(WasmVirtualRegisterKinds kind, int frame_index,
+ int index) {
+ StreamString packet;
+ switch (kind) {
+ case eWasmTagLocal:
+ packet.Printf("qWasmLocal:");
+ break;
+ case eWasmTagGlobal:
+ packet.Printf("qWasmGlobal:");
+ break;
+ case eWasmTagOperandStack:
+ packet.PutCString("qWasmStackValue:");
+ break;
+ case eWasmTagNotAWasmLocation:
+ return llvm::createStringError("not a Wasm location");
+ }
+ packet.Printf("%d;%d", frame_index, index);
+
+ StringExtractorGDBRemote response;
+ if (m_gdb_comm.SendPacketAndWaitForResponse(packet.GetString(), response) !=
+ GDBRemoteCommunication::PacketResult::Success)
+ return llvm::createStringError("failed to send Wasm variable");
+
+ if (!response.IsNormalResponse())
+ return llvm::createStringError("failed to get response for Wasm variable");
+
+ WritableDataBufferSP buffer_sp(
+ new DataBufferHeap(response.GetStringRef().size() / 2, 0));
+ response.GetHexBytes(buffer_sp->GetData(), '\xcc');
+ return buffer_sp;
+}
diff --git a/lldb/source/Plugins/Process/wasm/ProcessWasm.h b/lldb/source/Plugins/Process/wasm/ProcessWasm.h
index bab14a8..22effe7 100644
--- a/lldb/source/Plugins/Process/wasm/ProcessWasm.h
+++ b/lldb/source/Plugins/Process/wasm/ProcessWasm.h
@@ -10,6 +10,7 @@
#define LLDB_SOURCE_PLUGINS_PROCESS_WASM_PROCESSWASM_H
#include "Plugins/Process/gdb-remote/ProcessGDBRemote.h"
+#include "Utility/WasmVirtualRegisters.h"
namespace lldb_private {
namespace wasm {
@@ -71,12 +72,19 @@ public:
/// Retrieve the current call stack from the WebAssembly remote process.
llvm::Expected<std::vector<lldb::addr_t>> GetWasmCallStack(lldb::tid_t tid);
+ /// Query the value of a WebAssembly variable from the WebAssembly
+ /// remote process.
+ llvm::Expected<lldb::DataBufferSP>
+ GetWasmVariable(WasmVirtualRegisterKinds kind, int frame_index, int index);
+
protected:
std::shared_ptr<process_gdb_remote::ThreadGDBRemote>
CreateThread(lldb::tid_t tid) override;
private:
friend class UnwindWasm;
+ friend class ThreadWasm;
+
process_gdb_remote::GDBRemoteDynamicRegisterInfoSP &GetRegisterInfo() {
return m_register_info_sp;
}
diff --git a/lldb/source/Plugins/Process/wasm/RegisterContextWasm.cpp b/lldb/source/Plugins/Process/wasm/RegisterContextWasm.cpp
new file mode 100644
index 0000000..b468171
--- /dev/null
+++ b/lldb/source/Plugins/Process/wasm/RegisterContextWasm.cpp
@@ -0,0 +1,109 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "RegisterContextWasm.h"
+#include "Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h"
+#include "ProcessWasm.h"
+#include "ThreadWasm.h"
+#include "lldb/Utility/LLDBLog.h"
+#include "lldb/Utility/Log.h"
+#include "lldb/Utility/RegisterValue.h"
+#include "llvm/Support/Error.h"
+#include <memory>
+
+using namespace lldb;
+using namespace lldb_private;
+using namespace lldb_private::process_gdb_remote;
+using namespace lldb_private::wasm;
+
+RegisterContextWasm::RegisterContextWasm(
+ wasm::ThreadWasm &thread, uint32_t concrete_frame_idx,
+ GDBRemoteDynamicRegisterInfoSP reg_info_sp)
+ : GDBRemoteRegisterContext(thread, concrete_frame_idx, reg_info_sp, false,
+ false) {}
+
+RegisterContextWasm::~RegisterContextWasm() = default;
+
+uint32_t RegisterContextWasm::ConvertRegisterKindToRegisterNumber(
+ lldb::RegisterKind kind, uint32_t num) {
+ return num;
+}
+
+size_t RegisterContextWasm::GetRegisterCount() {
+ // Wasm has no registers.
+ return 0;
+}
+
+const RegisterInfo *RegisterContextWasm::GetRegisterInfoAtIndex(size_t reg) {
+ uint32_t tag = GetWasmVirtualRegisterTag(reg);
+ if (tag == eWasmTagNotAWasmLocation)
+ return m_reg_info_sp->GetRegisterInfoAtIndex(
+ GetWasmVirtualRegisterIndex(reg));
+
+ auto it = m_register_map.find(reg);
+ if (it == m_register_map.end()) {
+ WasmVirtualRegisterKinds kind = static_cast<WasmVirtualRegisterKinds>(tag);
+ std::tie(it, std::ignore) = m_register_map.insert(
+ {reg, std::make_unique<WasmVirtualRegisterInfo>(
+ kind, GetWasmVirtualRegisterIndex(reg))});
+ }
+ return it->second.get();
+}
+
+size_t RegisterContextWasm::GetRegisterSetCount() { return 0; }
+
+const RegisterSet *RegisterContextWasm::GetRegisterSet(size_t reg_set) {
+ // Wasm has no registers.
+ return nullptr;
+}
+
+bool RegisterContextWasm::ReadRegister(const RegisterInfo *reg_info,
+ RegisterValue &value) {
+ // The only real registers is the PC.
+ if (reg_info->name)
+ return GDBRemoteRegisterContext::ReadRegister(reg_info, value);
+
+ // Read the virtual registers.
+ ThreadWasm *thread = static_cast<ThreadWasm *>(&GetThread());
+ ProcessWasm *process = static_cast<ProcessWasm *>(thread->GetProcess().get());
+ if (!thread)
+ return false;
+
+ uint32_t frame_index = m_concrete_frame_idx;
+ WasmVirtualRegisterInfo *wasm_reg_info =
+ static_cast<WasmVirtualRegisterInfo *>(
+ const_cast<RegisterInfo *>(reg_info));
+
+ llvm::Expected<DataBufferSP> maybe_buffer = process->GetWasmVariable(
+ wasm_reg_info->kind, frame_index, wasm_reg_info->index);
+ if (!maybe_buffer) {
+ LLDB_LOG_ERROR(GetLog(LLDBLog::Process), maybe_buffer.takeError(),
+ "Failed to read Wasm local: {0}");
+ return false;
+ }
+
+ DataBufferSP buffer_sp = *maybe_buffer;
+ DataExtractor reg_data(buffer_sp, process->GetByteOrder(),
+ process->GetAddressByteSize());
+ wasm_reg_info->byte_size = buffer_sp->GetByteSize();
+ wasm_reg_info->encoding = lldb::eEncodingUint;
+
+ Status error = value.SetValueFromData(
+ *reg_info, reg_data, reg_info->byte_offset, /*partial_data_ok=*/false);
+ return error.Success();
+}
+
+void RegisterContextWasm::InvalidateAllRegisters() {}
+
+bool RegisterContextWasm::WriteRegister(const RegisterInfo *reg_info,
+ const RegisterValue &value) {
+ // The only real registers is the PC.
+ if (reg_info->name)
+ return GDBRemoteRegisterContext::WriteRegister(reg_info, value);
+ return false;
+}
diff --git a/lldb/source/Plugins/Process/wasm/RegisterContextWasm.h b/lldb/source/Plugins/Process/wasm/RegisterContextWasm.h
new file mode 100644
index 0000000..7e63eb8
--- /dev/null
+++ b/lldb/source/Plugins/Process/wasm/RegisterContextWasm.h
@@ -0,0 +1,69 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLDB_SOURCE_PLUGINS_PROCESS_WASM_REGISTERCONTEXTWASM_H
+#define LLDB_SOURCE_PLUGINS_PROCESS_WASM_REGISTERCONTEXTWASM_H
+
+#include "Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h"
+#include "ThreadWasm.h"
+#include "Utility/WasmVirtualRegisters.h"
+#include "lldb/lldb-private-types.h"
+#include <unordered_map>
+
+namespace lldb_private {
+namespace wasm {
+
+class RegisterContextWasm;
+
+typedef std::shared_ptr<RegisterContextWasm> RegisterContextWasmSP;
+
+struct WasmVirtualRegisterInfo : public RegisterInfo {
+ WasmVirtualRegisterKinds kind;
+ uint32_t index;
+
+ WasmVirtualRegisterInfo(WasmVirtualRegisterKinds kind, uint32_t index)
+ : RegisterInfo(), kind(kind), index(index) {}
+};
+
+class RegisterContextWasm
+ : public process_gdb_remote::GDBRemoteRegisterContext {
+public:
+ RegisterContextWasm(
+ wasm::ThreadWasm &thread, uint32_t concrete_frame_idx,
+ process_gdb_remote::GDBRemoteDynamicRegisterInfoSP reg_info_sp);
+
+ ~RegisterContextWasm() override;
+
+ uint32_t ConvertRegisterKindToRegisterNumber(lldb::RegisterKind kind,
+ uint32_t num) override;
+
+ void InvalidateAllRegisters() override;
+
+ size_t GetRegisterCount() override;
+
+ const RegisterInfo *GetRegisterInfoAtIndex(size_t reg) override;
+
+ size_t GetRegisterSetCount() override;
+
+ const RegisterSet *GetRegisterSet(size_t reg_set) override;
+
+ bool ReadRegister(const RegisterInfo *reg_info,
+ RegisterValue &value) override;
+
+ bool WriteRegister(const RegisterInfo *reg_info,
+ const RegisterValue &value) override;
+
+private:
+ std::unordered_map<size_t, std::unique_ptr<WasmVirtualRegisterInfo>>
+ m_register_map;
+};
+
+} // namespace wasm
+} // namespace lldb_private
+
+#endif
diff --git a/lldb/source/Plugins/Process/wasm/ThreadWasm.cpp b/lldb/source/Plugins/Process/wasm/ThreadWasm.cpp
index a6553ff..0666b75 100644
--- a/lldb/source/Plugins/Process/wasm/ThreadWasm.cpp
+++ b/lldb/source/Plugins/Process/wasm/ThreadWasm.cpp
@@ -9,6 +9,7 @@
#include "ThreadWasm.h"
#include "ProcessWasm.h"
+#include "RegisterContextWasm.h"
#include "UnwindWasm.h"
#include "lldb/Target/Target.h"
@@ -32,3 +33,19 @@ llvm::Expected<std::vector<lldb::addr_t>> ThreadWasm::GetWasmCallStack() {
}
return llvm::createStringError("no process");
}
+
+lldb::RegisterContextSP
+ThreadWasm::CreateRegisterContextForFrame(StackFrame *frame) {
+ uint32_t concrete_frame_idx = 0;
+ ProcessSP process_sp(GetProcess());
+ ProcessWasm *wasm_process = static_cast<ProcessWasm *>(process_sp.get());
+
+ if (frame)
+ concrete_frame_idx = frame->GetConcreteFrameIndex();
+
+ if (concrete_frame_idx == 0)
+ return std::make_shared<RegisterContextWasm>(
+ *this, concrete_frame_idx, wasm_process->GetRegisterInfo());
+
+ return GetUnwinder().CreateRegisterContextForFrame(frame);
+}
diff --git a/lldb/source/Plugins/Process/wasm/ThreadWasm.h b/lldb/source/Plugins/Process/wasm/ThreadWasm.h
index 1c90f58..c2f5762 100644
--- a/lldb/source/Plugins/Process/wasm/ThreadWasm.h
+++ b/lldb/source/Plugins/Process/wasm/ThreadWasm.h
@@ -25,6 +25,9 @@ public:
/// Retrieve the current call stack from the WebAssembly remote process.
llvm::Expected<std::vector<lldb::addr_t>> GetWasmCallStack();
+ lldb::RegisterContextSP
+ CreateRegisterContextForFrame(StackFrame *frame) override;
+
protected:
Unwind &GetUnwinder() override;
diff --git a/lldb/source/Plugins/SymbolFile/CTF/SymbolFileCTF.cpp b/lldb/source/Plugins/SymbolFile/CTF/SymbolFileCTF.cpp
index 81c6731..591fded 100644
--- a/lldb/source/Plugins/SymbolFile/CTF/SymbolFileCTF.cpp
+++ b/lldb/source/Plugins/SymbolFile/CTF/SymbolFileCTF.cpp
@@ -738,9 +738,29 @@ size_t SymbolFileCTF::ParseTypes(CompileUnit &cu) {
LLDB_LOG(log, "Parsed {0} CTF types", m_ctf_types.size());
- for (lldb::user_id_t uid = 1; uid < type_uid; ++uid)
+ for (lldb::user_id_t uid = 1; uid < type_uid; ++uid) {
ResolveTypeUID(uid);
+ // Remove the CTF type because we don't need it anymore, except for record
+ // types which we may need to complete later.
+ auto ctf_type_it = m_ctf_types.find(uid);
+ if (ctf_type_it != m_ctf_types.end()) {
+ CTFType *ctf_type = ctf_type_it->second.get();
+ if (!llvm::isa<CTFRecord>(ctf_type))
+ m_ctf_types.erase(uid);
+ }
+ }
+
+#ifndef NDEBUG
+ // Verify that the only CTF types left at this point are record types.
+ for (auto &t : m_ctf_types) {
+ CTFType *ctf_type = t.second.get();
+ assert(ctf_type && "invalid type in m_ctf_types");
+ assert(llvm::isa<CTFRecord>(ctf_type) && "leaking non record type");
+ }
+
+#endif
+
LLDB_LOG(log, "Created {0} CTF types", m_types.size());
return m_types.size();
@@ -994,6 +1014,8 @@ lldb_private::Type *SymbolFileCTF::ResolveTypeUID(lldb::user_id_t type_uid) {
CTFType *ctf_type = ctf_type_it->second.get();
assert(ctf_type && "m_ctf_types should only contain valid CTF types");
+ assert(ctf_type->uid == type_uid &&
+ "CTF type UID doesn't match UID in m_ctf_types");
Log *log = GetLog(LLDBLog::Symbols);
@@ -1015,11 +1037,6 @@ lldb_private::Type *SymbolFileCTF::ResolveTypeUID(lldb::user_id_t type_uid) {
m_types[type_uid] = type_sp;
- // Except for record types which we'll need to complete later, we don't need
- // the CTF type anymore.
- if (!isa<CTFRecord>(ctf_type))
- m_ctf_types.erase(type_uid);
-
return type_sp.get();
}
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/AppleDWARFIndex.cpp b/lldb/source/Plugins/SymbolFile/DWARF/AppleDWARFIndex.cpp
index 4bfbb4d..9762ead 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/AppleDWARFIndex.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/AppleDWARFIndex.cpp
@@ -13,6 +13,7 @@
#include "lldb/Core/Module.h"
#include "lldb/Symbol/Function.h"
+#include "lldb/lldb-private-enumerations.h"
#include "llvm/Support/DJB.h"
using namespace lldb;
@@ -275,7 +276,7 @@ void AppleDWARFIndex::GetNamespaces(
void AppleDWARFIndex::GetFunctions(
const Module::LookupInfo &lookup_info, SymbolFileDWARF &dwarf,
const CompilerDeclContext &parent_decl_ctx,
- llvm::function_ref<bool(DWARFDIE die)> callback) {
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback) {
if (!m_apple_names_up)
return;
@@ -288,15 +289,16 @@ void AppleDWARFIndex::GetFunctions(
ReportInvalidDIERef(die_ref, name);
continue;
}
- if (!ProcessFunctionDIE(lookup_info, die, parent_decl_ctx, callback))
+ if (ProcessFunctionDIE(lookup_info, die, parent_decl_ctx, callback) ==
+ IterationAction::Stop)
return;
}
}
void AppleDWARFIndex::GetFunctions(
const RegularExpression &regex,
- llvm::function_ref<bool(DWARFDIE die)> callback) {
- return GetGlobalVariables(regex, callback);
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback) {
+ return GetGlobalVariables(regex, IterationActionAdaptor(callback));
}
void AppleDWARFIndex::Dump(Stream &s) {
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/AppleDWARFIndex.h b/lldb/source/Plugins/SymbolFile/DWARF/AppleDWARFIndex.h
index 73de75b..c0f0eb6 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/AppleDWARFIndex.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/AppleDWARFIndex.h
@@ -61,12 +61,13 @@ public:
llvm::function_ref<bool(DWARFDIE die)> callback) override;
void GetNamespaces(ConstString name,
llvm::function_ref<bool(DWARFDIE die)> callback) override;
- void GetFunctions(const Module::LookupInfo &lookup_info,
- SymbolFileDWARF &dwarf,
- const CompilerDeclContext &parent_decl_ctx,
- llvm::function_ref<bool(DWARFDIE die)> callback) override;
- void GetFunctions(const RegularExpression &regex,
- llvm::function_ref<bool(DWARFDIE die)> callback) override;
+ void GetFunctions(
+ const Module::LookupInfo &lookup_info, SymbolFileDWARF &dwarf,
+ const CompilerDeclContext &parent_decl_ctx,
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback) override;
+ void GetFunctions(
+ const RegularExpression &regex,
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback) override;
void Dump(Stream &s) override;
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp
index ba65f50..781c1c6c 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFASTParserClang.cpp
@@ -24,6 +24,7 @@
#include "Plugins/Language/ObjC/ObjCLanguage.h"
#include "lldb/Core/Module.h"
#include "lldb/Core/Value.h"
+#include "lldb/Expression/Expression.h"
#include "lldb/Host/Host.h"
#include "lldb/Symbol/CompileUnit.h"
#include "lldb/Symbol/Function.h"
@@ -249,6 +250,47 @@ static unsigned GetCXXMethodCVQuals(const DWARFDIE &subprogram,
return cv_quals;
}
+static std::string MakeLLDBFuncAsmLabel(const DWARFDIE &die) {
+ char const *name = die.GetMangledName(/*substitute_name_allowed*/ false);
+ if (!name)
+ return {};
+
+ SymbolFileDWARF *dwarf = die.GetDWARF();
+ if (!dwarf)
+ return {};
+
+ auto get_module_id = [&](SymbolFile *sym) {
+ if (!sym)
+ return LLDB_INVALID_UID;
+
+ auto *obj = sym->GetMainObjectFile();
+ if (!obj)
+ return LLDB_INVALID_UID;
+
+ auto module_sp = obj->GetModule();
+ if (!module_sp)
+ return LLDB_INVALID_UID;
+
+ return module_sp->GetID();
+ };
+
+ lldb::user_id_t module_id = get_module_id(dwarf->GetDebugMapSymfile());
+ if (module_id == LLDB_INVALID_UID)
+ module_id = get_module_id(dwarf);
+
+ if (module_id == LLDB_INVALID_UID)
+ return {};
+
+ const auto die_id = die.GetID();
+ if (die_id == LLDB_INVALID_UID)
+ return {};
+
+ return FunctionCallLabel{/*module_id=*/module_id,
+ /*symbol_id=*/die_id,
+ /*.lookup_name=*/name}
+ .toString();
+}
+
TypeSP DWARFASTParserClang::ParseTypeFromClangModule(const SymbolContext &sc,
const DWARFDIE &die,
Log *log) {
@@ -1231,7 +1273,7 @@ std::pair<bool, TypeSP> DWARFASTParserClang::ParseCXXMethod(
clang::CXXMethodDecl *cxx_method_decl = m_ast.AddMethodToCXXRecordType(
class_opaque_type.GetOpaqueQualType(), attrs.name.GetCString(),
- attrs.mangled_name, clang_type, accessibility, attrs.is_virtual,
+ MakeLLDBFuncAsmLabel(die), clang_type, accessibility, attrs.is_virtual,
is_static, attrs.is_inline, attrs.is_explicit, is_attr_used,
attrs.is_artificial);
@@ -1384,7 +1426,7 @@ DWARFASTParserClang::ParseSubroutine(const DWARFDIE &die,
ignore_containing_context ? m_ast.GetTranslationUnitDecl()
: containing_decl_ctx,
GetOwningClangModule(die), name, clang_type, attrs.storage,
- attrs.is_inline);
+ attrs.is_inline, MakeLLDBFuncAsmLabel(die));
std::free(name_buf);
if (has_template_params) {
@@ -1394,7 +1436,7 @@ DWARFASTParserClang::ParseSubroutine(const DWARFDIE &die,
ignore_containing_context ? m_ast.GetTranslationUnitDecl()
: containing_decl_ctx,
GetOwningClangModule(die), attrs.name.GetStringRef(), clang_type,
- attrs.storage, attrs.is_inline);
+ attrs.storage, attrs.is_inline, /*asm_label=*/{});
clang::FunctionTemplateDecl *func_template_decl =
m_ast.CreateFunctionTemplateDecl(
containing_decl_ctx, GetOwningClangModule(die),
@@ -1406,20 +1448,6 @@ DWARFASTParserClang::ParseSubroutine(const DWARFDIE &die,
lldbassert(function_decl);
if (function_decl) {
- // Attach an asm(<mangled_name>) label to the FunctionDecl.
- // This ensures that clang::CodeGen emits function calls
- // using symbols that are mangled according to the DW_AT_linkage_name.
- // If we didn't do this, the external symbols wouldn't exactly
- // match the mangled name LLDB knows about and the IRExecutionUnit
- // would have to fall back to searching object files for
- // approximately matching function names. The motivating
- // example is generating calls to ABI-tagged template functions.
- // This is done separately for member functions in
- // AddMethodToCXXRecordType.
- if (attrs.mangled_name)
- function_decl->addAttr(clang::AsmLabelAttr::CreateImplicit(
- m_ast.getASTContext(), attrs.mangled_name, /*literal=*/false));
-
LinkDeclContextToDIE(function_decl, die);
const clang::FunctionProtoType *function_prototype(
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.cpp
index 30c890d..a806506 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.cpp
@@ -16,6 +16,7 @@
#include "lldb/Core/Mangled.h"
#include "lldb/Core/Module.h"
#include "lldb/Target/Language.h"
+#include "lldb/lldb-private-enumerations.h"
using namespace lldb_private;
using namespace lldb;
@@ -23,10 +24,10 @@ using namespace lldb_private::plugin::dwarf;
DWARFIndex::~DWARFIndex() = default;
-bool DWARFIndex::ProcessFunctionDIE(
+IterationAction DWARFIndex::ProcessFunctionDIE(
const Module::LookupInfo &lookup_info, DWARFDIE die,
const CompilerDeclContext &parent_decl_ctx,
- llvm::function_ref<bool(DWARFDIE die)> callback) {
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback) {
llvm::StringRef name = lookup_info.GetLookupName().GetStringRef();
FunctionNameType name_type_mask = lookup_info.GetNameTypeMask();
@@ -43,7 +44,7 @@ bool DWARFIndex::ProcessFunctionDIE(
if (!lookup_info.NameMatchesLookupInfo(name_to_match_against,
lookup_info.GetLanguageType()))
- return true;
+ return IterationAction::Continue;
}
// Exit early if we're searching exclusively for methods or selectors and
@@ -51,12 +52,12 @@ bool DWARFIndex::ProcessFunctionDIE(
uint32_t looking_for_nonmethods =
name_type_mask & ~(eFunctionNameTypeMethod | eFunctionNameTypeSelector);
if (!looking_for_nonmethods && parent_decl_ctx.IsValid())
- return true;
+ return IterationAction::Continue;
// Otherwise, we need to also check that the context matches. If it does not
// match, we do nothing.
if (!SymbolFileDWARF::DIEInDeclContext(parent_decl_ctx, die))
- return true;
+ return IterationAction::Continue;
// In case of a full match, we just insert everything we find.
if (name_type_mask & eFunctionNameTypeFull && die.GetMangledName() == name)
@@ -79,7 +80,7 @@ bool DWARFIndex::ProcessFunctionDIE(
return callback(die);
}
- return true;
+ return IterationAction::Continue;
}
DWARFIndex::DIERefCallbackImpl::DIERefCallbackImpl(
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.h b/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.h
index 15d8503..3578824 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFIndex.h
@@ -16,6 +16,7 @@
#include "lldb/Core/Module.h"
#include "lldb/Target/Statistics.h"
+#include "lldb/lldb-private-enumerations.h"
namespace lldb_private::plugin {
namespace dwarf {
@@ -82,10 +83,10 @@ public:
virtual void
GetFunctions(const Module::LookupInfo &lookup_info, SymbolFileDWARF &dwarf,
const CompilerDeclContext &parent_decl_ctx,
- llvm::function_ref<bool(DWARFDIE die)> callback) = 0;
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback) = 0;
virtual void
GetFunctions(const RegularExpression &regex,
- llvm::function_ref<bool(DWARFDIE die)> callback) = 0;
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback) = 0;
virtual void Dump(Stream &s) = 0;
@@ -101,9 +102,10 @@ protected:
/// the function given by "die" matches search criteria given by
/// "parent_decl_ctx" and "name_type_mask", it calls the callback with the
/// given die.
- bool ProcessFunctionDIE(const Module::LookupInfo &lookup_info, DWARFDIE die,
- const CompilerDeclContext &parent_decl_ctx,
- llvm::function_ref<bool(DWARFDIE die)> callback);
+ IterationAction ProcessFunctionDIE(
+ const Module::LookupInfo &lookup_info, DWARFDIE die,
+ const CompilerDeclContext &parent_decl_ctx,
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback);
class DIERefCallbackImpl {
public:
@@ -140,6 +142,25 @@ protected:
bool ProcessNamespaceDieMatchParents(
const CompilerDeclContext &parent_decl_ctx, DWARFDIE die,
llvm::function_ref<bool(DWARFDIE die)> callback);
+
+ /// Helper to convert callbacks that return an \c IterationAction
+ /// to a callback that returns a \c bool, where \c true indicates
+ /// we should continue iterating. This will be used to incrementally
+ /// migrate the callbacks to return an \c IterationAction.
+ ///
+ /// FIXME: remove once all callbacks in the DWARFIndex APIs return
+ /// IterationAction.
+ struct IterationActionAdaptor {
+ IterationActionAdaptor(
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback)
+ : m_callback_ref(callback) {}
+
+ bool operator()(DWARFDIE die) {
+ return m_callback_ref(std::move(die)) == IterationAction::Continue;
+ }
+
+ llvm::function_ref<IterationAction(DWARFDIE die)> m_callback_ref;
+ };
};
} // namespace dwarf
} // namespace lldb_private::plugin
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp
index ff1a76b..3ae9fcc 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp
@@ -14,6 +14,7 @@
#include "lldb/Core/Module.h"
#include "lldb/Utility/RegularExpression.h"
#include "lldb/Utility/Stream.h"
+#include "lldb/lldb-private-enumerations.h"
#include "llvm/ADT/Sequence.h"
#include <optional>
@@ -607,7 +608,7 @@ void DebugNamesDWARFIndex::GetNamespacesWithParents(
void DebugNamesDWARFIndex::GetFunctions(
const Module::LookupInfo &lookup_info, SymbolFileDWARF &dwarf,
const CompilerDeclContext &parent_decl_ctx,
- llvm::function_ref<bool(DWARFDIE die)> callback) {
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback) {
ConstString name = lookup_info.GetLookupName();
std::set<DWARFDebugInfoEntry *> seen;
for (const DebugNames::Entry &entry :
@@ -617,12 +618,12 @@ void DebugNamesDWARFIndex::GetFunctions(
continue;
if (DWARFDIE die = GetDIE(entry)) {
- if (!ProcessFunctionDIE(lookup_info, die, parent_decl_ctx,
- [&](DWARFDIE die) {
- if (!seen.insert(die.GetDIE()).second)
- return true;
- return callback(die);
- }))
+ if (ProcessFunctionDIE(lookup_info, die, parent_decl_ctx,
+ [&](DWARFDIE die) {
+ if (!seen.insert(die.GetDIE()).second)
+ return IterationAction::Continue;
+ return callback(die);
+ }) == IterationAction::Stop)
return;
}
}
@@ -632,7 +633,7 @@ void DebugNamesDWARFIndex::GetFunctions(
void DebugNamesDWARFIndex::GetFunctions(
const RegularExpression &regex,
- llvm::function_ref<bool(DWARFDIE die)> callback) {
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback) {
for (const DebugNames::NameIndex &ni: *m_debug_names_up) {
for (DebugNames::NameTableEntry nte: ni) {
if (!regex.Execute(nte.getString()))
@@ -645,7 +646,7 @@ void DebugNamesDWARFIndex::GetFunctions(
if (tag != DW_TAG_subprogram && tag != DW_TAG_inlined_subroutine)
continue;
- if (!ProcessEntry(*entry_or, callback))
+ if (!ProcessEntry(*entry_or, IterationActionAdaptor(callback)))
return;
}
MaybeLogLookupError(entry_or.takeError(), ni, nte.getString());
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h
index ab6cde1..2105919 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h
@@ -58,12 +58,13 @@ public:
void GetNamespacesWithParents(
ConstString name, const CompilerDeclContext &parent_decl_ctx,
llvm::function_ref<bool(DWARFDIE die)> callback) override;
- void GetFunctions(const Module::LookupInfo &lookup_info,
- SymbolFileDWARF &dwarf,
- const CompilerDeclContext &parent_decl_ctx,
- llvm::function_ref<bool(DWARFDIE die)> callback) override;
- void GetFunctions(const RegularExpression &regex,
- llvm::function_ref<bool(DWARFDIE die)> callback) override;
+ void GetFunctions(
+ const Module::LookupInfo &lookup_info, SymbolFileDWARF &dwarf,
+ const CompilerDeclContext &parent_decl_ctx,
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback) override;
+ void GetFunctions(
+ const RegularExpression &regex,
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback) override;
void Dump(Stream &s) override;
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp b/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp
index c858ce2..f96ac7e 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.cpp
@@ -21,6 +21,7 @@
#include "lldb/Utility/DataExtractor.h"
#include "lldb/Utility/Stream.h"
#include "lldb/Utility/Timer.h"
+#include "lldb/lldb-private-enumerations.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/ThreadPool.h"
#include <atomic>
@@ -471,60 +472,62 @@ void ManualDWARFIndex::GetNamespaces(
void ManualDWARFIndex::GetFunctions(
const Module::LookupInfo &lookup_info, SymbolFileDWARF &dwarf,
const CompilerDeclContext &parent_decl_ctx,
- llvm::function_ref<bool(DWARFDIE die)> callback) {
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback) {
Index();
ConstString name = lookup_info.GetLookupName();
FunctionNameType name_type_mask = lookup_info.GetNameTypeMask();
if (name_type_mask & eFunctionNameTypeFull) {
if (!m_set.function_fullnames.Find(
- name, DIERefCallback(
- [&](DWARFDIE die) {
- if (!SymbolFileDWARF::DIEInDeclContext(parent_decl_ctx,
- die))
- return true;
- return callback(die);
- },
- name.GetStringRef())))
+ name, DIERefCallback(IterationActionAdaptor([&](DWARFDIE die) {
+ if (!SymbolFileDWARF::DIEInDeclContext(
+ parent_decl_ctx, die))
+ return IterationAction::Continue;
+ return callback(die);
+ }),
+ name.GetStringRef())))
return;
}
if (name_type_mask & eFunctionNameTypeBase) {
if (!m_set.function_basenames.Find(
- name, DIERefCallback(
- [&](DWARFDIE die) {
- if (!SymbolFileDWARF::DIEInDeclContext(parent_decl_ctx,
- die))
- return true;
- return callback(die);
- },
- name.GetStringRef())))
+ name, DIERefCallback(IterationActionAdaptor([&](DWARFDIE die) {
+ if (!SymbolFileDWARF::DIEInDeclContext(
+ parent_decl_ctx, die))
+ return IterationAction::Continue;
+ return callback(die);
+ }),
+ name.GetStringRef())))
return;
}
if (name_type_mask & eFunctionNameTypeMethod && !parent_decl_ctx.IsValid()) {
if (!m_set.function_methods.Find(
- name, DIERefCallback(callback, name.GetStringRef())))
+ name, DIERefCallback(IterationActionAdaptor(callback),
+ name.GetStringRef())))
return;
}
if (name_type_mask & eFunctionNameTypeSelector &&
!parent_decl_ctx.IsValid()) {
if (!m_set.function_selectors.Find(
- name, DIERefCallback(callback, name.GetStringRef())))
+ name, DIERefCallback(IterationActionAdaptor(callback),
+ name.GetStringRef())))
return;
}
}
void ManualDWARFIndex::GetFunctions(
const RegularExpression &regex,
- llvm::function_ref<bool(DWARFDIE die)> callback) {
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback) {
Index();
- if (!m_set.function_basenames.Find(regex,
- DIERefCallback(callback, regex.GetText())))
+ if (!m_set.function_basenames.Find(
+ regex,
+ DIERefCallback(IterationActionAdaptor(callback), regex.GetText())))
return;
- if (!m_set.function_fullnames.Find(regex,
- DIERefCallback(callback, regex.GetText())))
+ if (!m_set.function_fullnames.Find(
+ regex,
+ DIERefCallback(IterationActionAdaptor(callback), regex.GetText())))
return;
}
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.h b/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.h
index 04627b0..5685ba4 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/ManualDWARFIndex.h
@@ -50,12 +50,13 @@ public:
llvm::function_ref<bool(DWARFDIE die)> callback) override;
void GetNamespaces(ConstString name,
llvm::function_ref<bool(DWARFDIE die)> callback) override;
- void GetFunctions(const Module::LookupInfo &lookup_info,
- SymbolFileDWARF &dwarf,
- const CompilerDeclContext &parent_decl_ctx,
- llvm::function_ref<bool(DWARFDIE die)> callback) override;
- void GetFunctions(const RegularExpression &regex,
- llvm::function_ref<bool(DWARFDIE die)> callback) override;
+ void GetFunctions(
+ const Module::LookupInfo &lookup_info, SymbolFileDWARF &dwarf,
+ const CompilerDeclContext &parent_decl_ctx,
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback) override;
+ void GetFunctions(
+ const RegularExpression &regex,
+ llvm::function_ref<IterationAction(DWARFDIE die)> callback) override;
void Dump(Stream &s) override;
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
index 41ab8d1..a3ba061 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.cpp
@@ -74,6 +74,7 @@
#include "ManualDWARFIndex.h"
#include "SymbolFileDWARFDebugMap.h"
#include "SymbolFileDWARFDwo.h"
+#include "lldb/lldb-private-enumerations.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h"
@@ -2475,6 +2476,55 @@ bool SymbolFileDWARF::ResolveFunction(const DWARFDIE &orig_die,
return false;
}
+llvm::Expected<SymbolContext>
+SymbolFileDWARF::ResolveFunctionCallLabel(const FunctionCallLabel &label) {
+ std::lock_guard<std::recursive_mutex> guard(GetModuleMutex());
+
+ DWARFDIE die = GetDIE(label.symbol_id);
+ if (!die.IsValid())
+ return llvm::createStringError(
+ llvm::formatv("invalid DIE ID in {0}", label));
+
+ // Label was created using a declaration DIE. Need to fetch the definition
+ // to resolve the function call.
+ if (die.GetAttributeValueAsUnsigned(llvm::dwarf::DW_AT_declaration, 0)) {
+ Module::LookupInfo info(ConstString(label.lookup_name),
+ lldb::eFunctionNameTypeFull,
+ lldb::eLanguageTypeUnknown);
+
+ m_index->GetFunctions(info, *this, {}, [&](DWARFDIE entry) {
+ if (entry.GetAttributeValueAsUnsigned(llvm::dwarf::DW_AT_declaration, 0))
+ return IterationAction::Continue;
+
+ // We don't check whether the specification DIE for this function
+ // corresponds to the declaration DIE because the declaration might be in
+ // a type-unit but the definition in the compile-unit (and it's
+ // specifcation would point to the declaration in the compile-unit). We
+ // rely on the mangled name within the module to be enough to find us the
+ // unique definition.
+ die = entry;
+ return IterationAction::Stop;
+ });
+
+ if (die.GetAttributeValueAsUnsigned(llvm::dwarf::DW_AT_declaration, 0))
+ return llvm::createStringError(
+ llvm::formatv("failed to find definition DIE for {0}", label));
+ }
+
+ SymbolContextList sc_list;
+ if (!ResolveFunction(die, /*include_inlines=*/false, sc_list))
+ return llvm::createStringError(
+ llvm::formatv("failed to resolve function for {0}", label));
+
+ if (sc_list.IsEmpty())
+ return llvm::createStringError(
+ llvm::formatv("failed to find function for {0}", label));
+
+ assert(sc_list.GetSize() == 1);
+
+ return sc_list[0];
+}
+
bool SymbolFileDWARF::DIEInDeclContext(const CompilerDeclContext &decl_ctx,
const DWARFDIE &die,
bool only_root_namespaces) {
@@ -2539,7 +2589,7 @@ void SymbolFileDWARF::FindFunctions(const Module::LookupInfo &lookup_info,
m_index->GetFunctions(lookup_info, *this, parent_decl_ctx, [&](DWARFDIE die) {
if (resolved_dies.insert(die.GetDIE()).second)
ResolveFunction(die, include_inlines, sc_list);
- return true;
+ return IterationAction::Continue;
});
// With -gsimple-template-names, a templated type's DW_AT_name will not
// contain the template parameters. Try again stripping '<' and anything
@@ -2556,7 +2606,7 @@ void SymbolFileDWARF::FindFunctions(const Module::LookupInfo &lookup_info,
[&](DWARFDIE die) {
if (resolved_dies.insert(die.GetDIE()).second)
ResolveFunction(die, include_inlines, sc_list);
- return true;
+ return IterationAction::Continue;
});
}
}
@@ -2592,7 +2642,7 @@ void SymbolFileDWARF::FindFunctions(const RegularExpression &regex,
m_index->GetFunctions(regex, [&](DWARFDIE die) {
if (resolved_dies.insert(die.GetDIE()).second)
ResolveFunction(die, include_inlines, sc_list);
- return true;
+ return IterationAction::Continue;
});
}
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h
index 56d8ccb..3ec538d 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARF.h
@@ -436,6 +436,9 @@ protected:
DIEArray MergeBlockAbstractParameters(const DWARFDIE &block_die,
DIEArray &&variable_dies);
+ llvm::Expected<SymbolContext>
+ ResolveFunctionCallLabel(const FunctionCallLabel &label) override;
+
// Given a die_offset, figure out the symbol context representing that die.
bool ResolveFunction(const DWARFDIE &die, bool include_inlines,
SymbolContextList &sc_list);
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.cpp b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.cpp
index dd94f0b..9d7452a 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.cpp
@@ -1602,3 +1602,14 @@ void SymbolFileDWARFDebugMap::GetCompileOptions(
return IterationAction::Continue;
});
}
+
+llvm::Expected<SymbolContext> SymbolFileDWARFDebugMap::ResolveFunctionCallLabel(
+ const FunctionCallLabel &label) {
+ const uint64_t oso_idx = GetOSOIndexFromUserID(label.symbol_id);
+ SymbolFileDWARF *oso_dwarf = GetSymbolFileByOSOIndex(oso_idx);
+ if (!oso_dwarf)
+ return llvm::createStringError(llvm::formatv(
+ "couldn't find symbol file for {0} in debug-map.", label));
+
+ return oso_dwarf->ResolveFunctionCallLabel(label);
+}
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.h b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.h
index f074b17..e1f1df23 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/SymbolFileDWARFDebugMap.h
@@ -144,6 +144,9 @@ public:
void
GetCompileOptions(std::unordered_map<lldb::CompUnitSP, Args> &args) override;
+ llvm::Expected<SymbolContext>
+ ResolveFunctionCallLabel(const FunctionCallLabel &label) override;
+
protected:
enum { kHaveInitializedOSOs = (1 << 0), kNumFlags };
diff --git a/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp b/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp
index 702ec5e..8137622 100644
--- a/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp
+++ b/lldb/source/Plugins/SymbolFile/NativePDB/PdbAstBuilder.cpp
@@ -88,7 +88,7 @@ struct CreateMethodDecl : public TypeVisitorCallbacks {
MethodOptions::CompilerGenerated;
function_decl = m_clang.AddMethodToCXXRecordType(
parent_ty, proc_name,
- /*mangled_name=*/nullptr, func_ct, /*access=*/access_type,
+ /*asm_label=*/{}, func_ct, /*access=*/access_type,
/*is_virtual=*/is_virtual, /*is_static=*/is_static,
/*is_inline=*/false, /*is_explicit=*/false,
/*is_attr_used=*/false, /*is_artificial=*/is_artificial);
@@ -903,7 +903,7 @@ PdbAstBuilder::CreateFunctionDecl(PdbCompilandSymId func_id,
if (!function_decl) {
function_decl = m_clang.AddMethodToCXXRecordType(
parent_opaque_ty, func_name,
- /*mangled_name=*/nullptr, func_ct,
+ /*asm_label=*/{}, func_ct,
/*access=*/lldb::AccessType::eAccessPublic,
/*is_virtual=*/false, /*is_static=*/false,
/*is_inline=*/false, /*is_explicit=*/false,
@@ -913,7 +913,7 @@ PdbAstBuilder::CreateFunctionDecl(PdbCompilandSymId func_id,
} else {
function_decl = m_clang.CreateFunctionDeclaration(
parent, OptionalClangModuleID(), func_name, func_ct, func_storage,
- is_inline);
+ is_inline, /*asm_label=*/{});
CreateFunctionParameters(func_id, *function_decl, param_count);
}
return function_decl;
diff --git a/lldb/source/Plugins/SymbolFile/NativePDB/UdtRecordCompleter.cpp b/lldb/source/Plugins/SymbolFile/NativePDB/UdtRecordCompleter.cpp
index 807ee5b..1c575e9 100644
--- a/lldb/source/Plugins/SymbolFile/NativePDB/UdtRecordCompleter.cpp
+++ b/lldb/source/Plugins/SymbolFile/NativePDB/UdtRecordCompleter.cpp
@@ -111,9 +111,8 @@ void UdtRecordCompleter::AddMethod(llvm::StringRef name, TypeIndex type_idx,
bool is_artificial = (options & MethodOptions::CompilerGenerated) ==
MethodOptions::CompilerGenerated;
m_ast_builder.clang().AddMethodToCXXRecordType(
- derived_opaque_ty, name.data(), nullptr, method_ct,
- access_type, attrs.isVirtual(), attrs.isStatic(), false, false, false,
- is_artificial);
+ derived_opaque_ty, name.data(), /*asm_label=*/{}, method_ct, access_type,
+ attrs.isVirtual(), attrs.isStatic(), false, false, false, is_artificial);
m_cxx_record_map[derived_opaque_ty].insert({name, method_ct});
}
diff --git a/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp b/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp
index 0090d8f..8b8eac6e 100644
--- a/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp
+++ b/lldb/source/Plugins/SymbolFile/PDB/PDBASTParser.cpp
@@ -954,7 +954,8 @@ PDBASTParser::GetDeclForSymbol(const llvm::pdb::PDBSymbol &symbol) {
auto decl = m_ast.CreateFunctionDeclaration(
decl_context, OptionalClangModuleID(), name,
- type->GetForwardCompilerType(), storage, func->hasInlineAttribute());
+ type->GetForwardCompilerType(), storage, func->hasInlineAttribute(),
+ /*asm_label=*/{});
std::vector<clang::ParmVarDecl *> params;
if (std::unique_ptr<PDBSymbolTypeFunctionSig> sig = func->getSignature()) {
@@ -1446,7 +1447,7 @@ PDBASTParser::AddRecordMethod(lldb_private::SymbolFile &symbol_file,
// TODO: get mangled name for the method.
return m_ast.AddMethodToCXXRecordType(
record_type.GetOpaqueQualType(), name.c_str(),
- /*mangled_name*/ nullptr, method_comp_type, access, method.isVirtual(),
+ /*asm_label=*/{}, method_comp_type, access, method.isVirtual(),
method.isStatic(), method.hasInlineAttribute(),
/*is_explicit*/ false, // FIXME: Need this field in CodeView.
/*is_attr_used*/ false,
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
index 256952dc..9301f92 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp
@@ -60,6 +60,7 @@
#include "lldb/Core/Module.h"
#include "lldb/Core/PluginManager.h"
#include "lldb/Core/UniqueCStringMap.h"
+#include "lldb/Expression/Expression.h"
#include "lldb/Host/StreamFile.h"
#include "lldb/Symbol/ObjectFile.h"
#include "lldb/Symbol/SymbolFile.h"
@@ -665,10 +666,9 @@ void TypeSystemClang::CreateASTContext() {
m_file_manager_up = std::make_unique<clang::FileManager>(
file_system_options, FileSystem::Instance().GetVirtualFileSystem());
- llvm::IntrusiveRefCntPtr<DiagnosticIDs> diag_id_sp(new DiagnosticIDs());
m_diagnostic_options_up = std::make_unique<DiagnosticOptions>();
- m_diagnostics_engine_up =
- std::make_unique<DiagnosticsEngine>(diag_id_sp, *m_diagnostic_options_up);
+ m_diagnostics_engine_up = std::make_unique<DiagnosticsEngine>(
+ DiagnosticIDs::create(), *m_diagnostic_options_up);
m_source_manager_up = std::make_unique<clang::SourceManager>(
*m_diagnostics_engine_up, *m_file_manager_up);
@@ -796,6 +796,8 @@ TypeSystemClang::GetBuiltinTypeForEncodingAndBitSize(Encoding encoding,
return GetType(ast.LongDoubleTy);
if (QualTypeMatchesBitSize(bit_size, ast, ast.HalfTy))
return GetType(ast.HalfTy);
+ if (QualTypeMatchesBitSize(bit_size, ast, ast.Float128Ty))
+ return GetType(ast.Float128Ty);
break;
case eEncodingVector:
@@ -957,6 +959,13 @@ CompilerType TypeSystemClang::GetBuiltinTypeForDWARFEncodingAndBitSize(
if (type_name == "long double" &&
QualTypeMatchesBitSize(bit_size, ast, ast.LongDoubleTy))
return GetType(ast.LongDoubleTy);
+ // As Rust currently uses `TypeSystemClang`, match `f128` here as well so it
+ // doesn't get misinterpreted as `long double` on targets where they are
+ // the same size but different formats.
+ if ((type_name == "__float128" || type_name == "_Float128" ||
+ type_name == "f128") &&
+ QualTypeMatchesBitSize(bit_size, ast, ast.Float128Ty))
+ return GetType(ast.Float128Ty);
// Fall back to not requiring a name match
if (QualTypeMatchesBitSize(bit_size, ast, ast.FloatTy))
return GetType(ast.FloatTy);
@@ -966,6 +975,8 @@ CompilerType TypeSystemClang::GetBuiltinTypeForDWARFEncodingAndBitSize(
return GetType(ast.LongDoubleTy);
if (QualTypeMatchesBitSize(bit_size, ast, ast.HalfTy))
return GetType(ast.HalfTy);
+ if (QualTypeMatchesBitSize(bit_size, ast, ast.Float128Ty))
+ return GetType(ast.Float128Ty);
break;
case DW_ATE_signed:
@@ -2055,6 +2066,8 @@ TypeSystemClang::GetOpaqueCompilerType(clang::ASTContext *ast,
return ast->DoubleTy.getAsOpaquePtr();
case eBasicTypeLongDouble:
return ast->LongDoubleTy.getAsOpaquePtr();
+ case eBasicTypeFloat128:
+ return ast->Float128Ty.getAsOpaquePtr();
case eBasicTypeFloatComplex:
return ast->getComplexType(ast->FloatTy).getAsOpaquePtr();
case eBasicTypeDoubleComplex:
@@ -2137,7 +2150,7 @@ std::string TypeSystemClang::GetTypeNameForDecl(const NamedDecl *named_decl,
FunctionDecl *TypeSystemClang::CreateFunctionDeclaration(
clang::DeclContext *decl_ctx, OptionalClangModuleID owning_module,
llvm::StringRef name, const CompilerType &function_clang_type,
- clang::StorageClass storage, bool is_inline) {
+ clang::StorageClass storage, bool is_inline, llvm::StringRef asm_label) {
FunctionDecl *func_decl = nullptr;
ASTContext &ast = getASTContext();
if (!decl_ctx)
@@ -2158,6 +2171,21 @@ FunctionDecl *TypeSystemClang::CreateFunctionDeclaration(
func_decl->setConstexprKind(isConstexprSpecified
? ConstexprSpecKind::Constexpr
: ConstexprSpecKind::Unspecified);
+
+ // Attach an asm(<mangled_name>) label to the FunctionDecl.
+ // This ensures that clang::CodeGen emits function calls
+ // using symbols that are mangled according to the DW_AT_linkage_name.
+ // If we didn't do this, the external symbols wouldn't exactly
+ // match the mangled name LLDB knows about and the IRExecutionUnit
+ // would have to fall back to searching object files for
+ // approximately matching function names. The motivating
+ // example is generating calls to ABI-tagged template functions.
+ // This is done separately for member functions in
+ // AddMethodToCXXRecordType.
+ if (!asm_label.empty())
+ func_decl->addAttr(clang::AsmLabelAttr::CreateImplicit(ast, asm_label,
+ /*literal=*/true));
+
SetOwningModule(func_decl, owning_module);
decl_ctx->addDecl(func_decl);
@@ -4728,19 +4756,24 @@ CompilerType TypeSystemClang::CreateGenericFunctionPrototype() {
// Exploring the type
const llvm::fltSemantics &
-TypeSystemClang::GetFloatTypeSemantics(size_t byte_size) {
+TypeSystemClang::GetFloatTypeSemantics(size_t byte_size, lldb::Format format) {
clang::ASTContext &ast = getASTContext();
const size_t bit_size = byte_size * 8;
if (bit_size == ast.getTypeSize(ast.FloatTy))
return ast.getFloatTypeSemantics(ast.FloatTy);
else if (bit_size == ast.getTypeSize(ast.DoubleTy))
return ast.getFloatTypeSemantics(ast.DoubleTy);
+ else if (format == eFormatFloat128 &&
+ bit_size == ast.getTypeSize(ast.Float128Ty))
+ return ast.getFloatTypeSemantics(ast.Float128Ty);
else if (bit_size == ast.getTypeSize(ast.LongDoubleTy) ||
bit_size == llvm::APFloat::semanticsSizeInBits(
ast.getFloatTypeSemantics(ast.LongDoubleTy)))
return ast.getFloatTypeSemantics(ast.LongDoubleTy);
else if (bit_size == ast.getTypeSize(ast.HalfTy))
return ast.getFloatTypeSemantics(ast.HalfTy);
+ else if (bit_size == ast.getTypeSize(ast.Float128Ty))
+ return ast.getFloatTypeSemantics(ast.Float128Ty);
return llvm::APFloatBase::Bogus();
}
@@ -5218,6 +5251,8 @@ lldb::Format TypeSystemClang::GetFormat(lldb::opaque_compiler_type_t type) {
case clang::BuiltinType::Double:
case clang::BuiltinType::LongDouble:
return lldb::eFormatFloat;
+ case clang::BuiltinType::Float128:
+ return lldb::eFormatFloat128;
default:
return lldb::eFormatHex;
}
@@ -5515,6 +5550,8 @@ TypeSystemClang::GetBasicTypeEnumeration(lldb::opaque_compiler_type_t type) {
return eBasicTypeDouble;
case clang::BuiltinType::LongDouble:
return eBasicTypeLongDouble;
+ case clang::BuiltinType::Float128:
+ return eBasicTypeFloat128;
case clang::BuiltinType::NullPtr:
return eBasicTypeNullPtr;
@@ -6076,6 +6113,7 @@ uint32_t TypeSystemClang::GetNumPointeeChildren(clang::QualType type) {
case clang::BuiltinType::Float:
case clang::BuiltinType::Double:
case clang::BuiltinType::LongDouble:
+ case clang::BuiltinType::Float128:
case clang::BuiltinType::Dependent:
case clang::BuiltinType::Overload:
case clang::BuiltinType::ObjCId:
@@ -7651,7 +7689,7 @@ TypeSystemClang::CreateParameterDeclarations(
clang::CXXMethodDecl *TypeSystemClang::AddMethodToCXXRecordType(
lldb::opaque_compiler_type_t type, llvm::StringRef name,
- const char *mangled_name, const CompilerType &method_clang_type,
+ llvm::StringRef asm_label, const CompilerType &method_clang_type,
lldb::AccessType access, bool is_virtual, bool is_static, bool is_inline,
bool is_explicit, bool is_attr_used, bool is_artificial) {
if (!type || !method_clang_type.IsValid() || name.empty())
@@ -7784,10 +7822,9 @@ clang::CXXMethodDecl *TypeSystemClang::AddMethodToCXXRecordType(
if (is_attr_used)
cxx_method_decl->addAttr(clang::UsedAttr::CreateImplicit(getASTContext()));
- if (mangled_name != nullptr) {
+ if (!asm_label.empty())
cxx_method_decl->addAttr(clang::AsmLabelAttr::CreateImplicit(
- getASTContext(), mangled_name, /*literal=*/false));
- }
+ getASTContext(), asm_label, /*literal=*/true));
// Parameters on member function declarations in DWARF generally don't
// have names, so we omit them when creating the ParmVarDecls.
@@ -8720,6 +8757,7 @@ bool TypeSystemClang::DumpTypeValue(
case eFormatHex:
case eFormatHexUppercase:
case eFormatFloat:
+ case eFormatFloat128:
case eFormatOctal:
case eFormatOSType:
case eFormatUnsigned:
@@ -9030,6 +9068,21 @@ ConstString TypeSystemClang::DeclGetName(void *opaque_decl) {
return ConstString();
}
+static ConstString
+ExtractMangledNameFromFunctionCallLabel(llvm::StringRef label) {
+ auto label_or_err = FunctionCallLabel::fromString(label);
+ if (!label_or_err) {
+ llvm::consumeError(label_or_err.takeError());
+ return {};
+ }
+
+ llvm::StringRef mangled = label_or_err->lookup_name;
+ if (Mangled::IsMangledName(mangled))
+ return ConstString(mangled);
+
+ return {};
+}
+
ConstString TypeSystemClang::DeclGetMangledName(void *opaque_decl) {
clang::NamedDecl *nd = llvm::dyn_cast_or_null<clang::NamedDecl>(
static_cast<clang::Decl *>(opaque_decl));
@@ -9041,6 +9094,13 @@ ConstString TypeSystemClang::DeclGetMangledName(void *opaque_decl) {
if (!mc || !mc->shouldMangleCXXName(nd))
return {};
+ // We have an LLDB FunctionCallLabel instead of an ordinary mangled name.
+ // Extract the mangled name out of this label.
+ if (const auto *label = nd->getAttr<AsmLabelAttr>())
+ if (ConstString mangled =
+ ExtractMangledNameFromFunctionCallLabel(label->getLabel()))
+ return mangled;
+
llvm::SmallVector<char, 1024> buf;
llvm::raw_svector_ostream llvm_ostrm(buf);
if (llvm::isa<clang::CXXConstructorDecl>(nd)) {
diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
index 63dee9d..5431d12 100644
--- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
+++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h
@@ -477,7 +477,7 @@ public:
clang::FunctionDecl *CreateFunctionDeclaration(
clang::DeclContext *decl_ctx, OptionalClangModuleID owning_module,
llvm::StringRef name, const CompilerType &function_Type,
- clang::StorageClass storage, bool is_inline);
+ clang::StorageClass storage, bool is_inline, llvm::StringRef asm_label);
CompilerType
CreateFunctionType(const CompilerType &result_type,
@@ -823,7 +823,8 @@ public:
// Exploring the type
- const llvm::fltSemantics &GetFloatTypeSemantics(size_t byte_size) override;
+ const llvm::fltSemantics &GetFloatTypeSemantics(size_t byte_size,
+ lldb::Format format) override;
llvm::Expected<uint64_t> GetByteSize(lldb::opaque_compiler_type_t type,
ExecutionContextScope *exe_scope) {
@@ -1001,7 +1002,7 @@ public:
clang::CXXMethodDecl *AddMethodToCXXRecordType(
lldb::opaque_compiler_type_t type, llvm::StringRef name,
- const char *mangled_name, const CompilerType &method_type,
+ llvm::StringRef asm_label, const CompilerType &method_type,
lldb::AccessType access, bool is_virtual, bool is_static, bool is_inline,
bool is_explicit, bool is_attr_used, bool is_artificial);
diff --git a/lldb/source/Target/Process.cpp b/lldb/source/Target/Process.cpp
index 2aa02fd..ff9e5fc 100644
--- a/lldb/source/Target/Process.cpp
+++ b/lldb/source/Target/Process.cpp
@@ -166,6 +166,9 @@ ProcessProperties::ProcessProperties(lldb_private::Process *process)
m_collection_sp->SetValueChangedCallback(
ePropertyPythonOSPluginPath,
[this] { m_process->LoadOperatingSystemPlugin(true); });
+ m_collection_sp->SetValueChangedCallback(
+ ePropertyDisableLangRuntimeUnwindPlans,
+ [this] { DisableLanguageRuntimeUnwindPlansCallback(); });
}
m_experimental_properties_up =
@@ -280,6 +283,15 @@ void ProcessProperties::SetDisableLangRuntimeUnwindPlans(bool disable) {
m_process->Flush();
}
+void ProcessProperties::DisableLanguageRuntimeUnwindPlansCallback() {
+ if (!m_process)
+ return;
+ for (auto thread_sp : m_process->Threads()) {
+ thread_sp->ClearStackFrames();
+ thread_sp->DiscardThreadPlans(/*force*/ true);
+ }
+}
+
bool ProcessProperties::GetDetachKeepsStopped() const {
const uint32_t idx = ePropertyDetachKeepsStopped;
return GetPropertyAtIndexAs<bool>(
diff --git a/lldb/source/ValueObject/ValueObject.cpp b/lldb/source/ValueObject/ValueObject.cpp
index 84ad130..3878442 100644
--- a/lldb/source/ValueObject/ValueObject.cpp
+++ b/lldb/source/ValueObject/ValueObject.cpp
@@ -1466,8 +1466,9 @@ bool ValueObject::DumpPrintableRepresentation(
(custom_format == eFormatComplexFloat) ||
(custom_format == eFormatDecimal) || (custom_format == eFormatHex) ||
(custom_format == eFormatHexUppercase) ||
- (custom_format == eFormatFloat) || (custom_format == eFormatOctal) ||
- (custom_format == eFormatOSType) ||
+ (custom_format == eFormatFloat) ||
+ (custom_format == eFormatFloat128) ||
+ (custom_format == eFormatOctal) || (custom_format == eFormatOSType) ||
(custom_format == eFormatUnicode16) ||
(custom_format == eFormatUnicode32) ||
(custom_format == eFormatUnsigned) ||
diff --git a/lldb/test/API/commands/watchpoints/step_over_watchpoint/TestStepOverWatchpoint.py b/lldb/test/API/commands/watchpoints/step_over_watchpoint/TestStepOverWatchpoint.py
index 8179d52..67dfbea 100644
--- a/lldb/test/API/commands/watchpoints/step_over_watchpoint/TestStepOverWatchpoint.py
+++ b/lldb/test/API/commands/watchpoints/step_over_watchpoint/TestStepOverWatchpoint.py
@@ -50,11 +50,11 @@ class TestStepOverWatchpoint(TestBase):
lldb.eStopReasonWatchpoint,
STOPPED_DUE_TO_WATCHPOINT,
)
- self.assertEqual(thread.GetStopDescription(20), "watchpoint 1")
+ self.assertEqual(thread.stop_description, "watchpoint 1")
process.Continue()
self.assertState(process.GetState(), lldb.eStateStopped, PROCESS_STOPPED)
- self.assertEqual(thread.GetStopDescription(20), "step over")
+ self.assertEqual(thread.stop_description, "step over")
self.step_inst_for_watchpoint(1)
@@ -89,11 +89,11 @@ class TestStepOverWatchpoint(TestBase):
lldb.eStopReasonWatchpoint,
STOPPED_DUE_TO_WATCHPOINT,
)
- self.assertEqual(thread.GetStopDescription(20), "watchpoint 1")
+ self.assertEqual(thread.stop_description, "watchpoint 1")
process.Continue()
self.assertState(process.GetState(), lldb.eStateStopped, PROCESS_STOPPED)
- self.assertEqual(thread.GetStopDescription(20), "step over")
+ self.assertEqual(thread.stop_description, "step over")
self.step_inst_for_watchpoint(1)
@@ -106,7 +106,7 @@ class TestStepOverWatchpoint(TestBase):
if stop_reason == lldb.eStopReasonWatchpoint:
self.assertFalse(watchpoint_hit, "Watchpoint already hit.")
expected_stop_desc = "watchpoint %d" % wp_id
- actual_stop_desc = self.thread().GetStopDescription(20)
+ actual_stop_desc = self.thread().stop_description
self.assertEqual(
actual_stop_desc, expected_stop_desc, "Watchpoint ID didn't match."
)
diff --git a/lldb/test/API/commands/watchpoints/watchpoint_count/TestWatchpointCount.py b/lldb/test/API/commands/watchpoints/watchpoint_count/TestWatchpointCount.py
index ff834b5..a0251d4 100644
--- a/lldb/test/API/commands/watchpoints/watchpoint_count/TestWatchpointCount.py
+++ b/lldb/test/API/commands/watchpoints/watchpoint_count/TestWatchpointCount.py
@@ -35,7 +35,7 @@ class TestWatchpointCount(TestBase):
self.assertStopReason(
stop_reason, lldb.eStopReasonWatchpoint, "watchpoint for x1 not hit"
)
- stop_reason_descr = thread.GetStopDescription(256)
+ stop_reason_descr = thread.stop_description
self.assertEqual(stop_reason_descr, "watchpoint 1")
process.Continue()
@@ -43,5 +43,5 @@ class TestWatchpointCount(TestBase):
self.assertStopReason(
stop_reason, lldb.eStopReasonWatchpoint, "watchpoint for x2 not hit"
)
- stop_reason_descr = thread.GetStopDescription(256)
+ stop_reason_descr = thread.stop_description
self.assertEqual(stop_reason_descr, "watchpoint 2")
diff --git a/lldb/test/API/functionalities/gdb_remote_client/TestGDBRemoteClient.py b/lldb/test/API/functionalities/gdb_remote_client/TestGDBRemoteClient.py
index 12b464d..67c5d7d 100644
--- a/lldb/test/API/functionalities/gdb_remote_client/TestGDBRemoteClient.py
+++ b/lldb/test/API/functionalities/gdb_remote_client/TestGDBRemoteClient.py
@@ -594,7 +594,7 @@ class TestGDBRemoteClient(GDBRemoteTestBase):
process = self.connect(target)
self.assertEqual(process.threads[0].GetStopReason(), lldb.eStopReasonSignal)
- self.assertEqual(process.threads[0].GetStopDescription(100), "signal SIGBUS")
+ self.assertEqual(process.threads[0].stop_description, "signal SIGBUS")
def test_signal_lldb_old(self):
class MyResponder(MockGDBServerResponder):
@@ -620,7 +620,7 @@ class TestGDBRemoteClient(GDBRemoteTestBase):
process = self.connect(target)
self.assertEqual(process.threads[0].GetStopReason(), lldb.eStopReasonSignal)
- self.assertEqual(process.threads[0].GetStopDescription(100), "signal SIGUSR1")
+ self.assertEqual(process.threads[0].stop_description, "signal SIGUSR1")
def test_signal_lldb(self):
class MyResponder(MockGDBServerResponder):
@@ -643,7 +643,7 @@ class TestGDBRemoteClient(GDBRemoteTestBase):
process = self.connect(target)
self.assertEqual(process.threads[0].GetStopReason(), lldb.eStopReasonSignal)
- self.assertEqual(process.threads[0].GetStopDescription(100), "signal SIGUSR1")
+ self.assertEqual(process.threads[0].stop_description, "signal SIGUSR1")
def do_siginfo_test(self, platform, target_yaml, raw_data, expected):
class MyResponder(MockGDBServerResponder):
diff --git a/lldb/test/API/functionalities/gdb_remote_client/TestWasm.py b/lldb/test/API/functionalities/gdb_remote_client/TestWasm.py
index 445f422..73c81ef 100644
--- a/lldb/test/API/functionalities/gdb_remote_client/TestWasm.py
+++ b/lldb/test/API/functionalities/gdb_remote_client/TestWasm.py
@@ -1,12 +1,15 @@
import lldb
+import os
import binascii
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
from lldbsuite.test.gdbclientutils import *
from lldbsuite.test.lldbgdbclient import GDBRemoteTestBase
-LLDB_INVALID_ADDRESS = lldb.LLDB_INVALID_ADDRESS
-load_address = 0x400000000
+MODULE_ID = 4
+LOAD_ADDRESS = MODULE_ID << 32
+WASM_LOCAL_ADDR = 0x103E0
+
def format_register_value(val):
"""
@@ -23,12 +26,59 @@ def format_register_value(val):
return result
+class WasmStackFrame:
+ def __init__(self, address):
+ self._address = address
+
+ def __str__(self):
+ return format_register_value(LOAD_ADDRESS | self._address)
+
+
+class WasmCallStack:
+ def __init__(self, wasm_stack_frames):
+ self._wasm_stack_frames = wasm_stack_frames
+
+ def __str__(self):
+ result = ""
+ for frame in self._wasm_stack_frames:
+ result += str(frame)
+ return result
+
+
+class FakeMemory:
+ def __init__(self, start_addr, end_addr):
+ self._base_addr = start_addr
+ self._memory = bytearray(end_addr - start_addr)
+ self._memoryview = memoryview(self._memory)
+
+ def store_bytes(self, addr, bytes_obj):
+ assert addr > self._base_addr
+ assert addr < self._base_addr + len(self._memoryview)
+ offset = addr - self._base_addr
+ chunk = self._memoryview[offset : offset + len(bytes_obj)]
+ for i in range(len(bytes_obj)):
+ chunk[i] = bytes_obj[i]
+
+ def get_bytes(self, addr, length):
+ assert addr > self._base_addr
+ assert addr < self._base_addr + len(self._memoryview)
+
+ offset = addr - self._base_addr
+ return self._memoryview[offset : offset + length]
+
+ def contains(self, addr):
+ return addr - self._base_addr < len(self._memoryview)
+
+
class MyResponder(MockGDBServerResponder):
- current_pc = load_address + 0x0A
+ current_pc = LOAD_ADDRESS | 0x01AD
- def __init__(self, obj_path, module_name=""):
+ def __init__(self, obj_path, module_name="", wasm_call_stacks=[], memory=None):
self._obj_path = obj_path
self._module_name = module_name or obj_path
+ self._wasm_call_stacks = wasm_call_stacks
+ self._call_stack_request_count = 0
+ self._memory = memory
MockGDBServerResponder.__init__(self)
def respond(self, packet):
@@ -36,6 +86,8 @@ class MyResponder(MockGDBServerResponder):
return self.qRegisterInfo(packet[13:])
if packet.startswith("qWasmCallStack"):
return self.qWasmCallStack()
+ if packet.startswith("qWasmLocal"):
+ return self.qWasmLocal(packet)
return MockGDBServerResponder.respond(self, packet)
def qSupported(self, client_supported):
@@ -71,28 +123,61 @@ class MyResponder(MockGDBServerResponder):
if obj == "libraries":
xml = (
'<library-list><library name="%s"><section address="%d"/></library></library-list>'
- % (self._module_name, load_address)
+ % (self._module_name, LOAD_ADDRESS)
)
return xml, False
else:
return None, False
def readMemory(self, addr, length):
- if addr < load_address:
+ if self._memory and self._memory.contains(addr):
+ chunk = self._memory.get_bytes(addr, length)
+ return chunk.hex()
+ if addr < LOAD_ADDRESS:
return "E02"
result = ""
with open(self._obj_path, mode="rb") as file:
file_content = bytearray(file.read())
- addr_from = addr - load_address
+ if addr >= LOAD_ADDRESS + len(file_content):
+ return "E03"
+ addr_from = addr - LOAD_ADDRESS
addr_to = addr_from + min(length, len(file_content) - addr_from)
for i in range(addr_from, addr_to):
result += format(file_content[i], "02x")
file.close()
return result
+ def setBreakpoint(self, packet):
+ bp_data = packet[1:].split(",")
+ self._bp_address = bp_data[1]
+ return "OK"
+
+ def qfThreadInfo(self):
+ return "m1"
+
+ def cont(self):
+ # Continue execution. Simulates running the Wasm engine until a breakpoint is hit.
+ return (
+ "T05thread-pcs:"
+ + format(int(self._bp_address, 16) & 0x3FFFFFFFFFFFFFFF, "x")
+ + ";thread:1"
+ )
+
def qWasmCallStack(self):
- # Return two 64-bit addresses: 0x40000000000001B3, 0x40000000000001FE
- return "b301000000000040fe01000000000040"
+ if len(self._wasm_call_stacks) == 0:
+ return ""
+ result = str(self._wasm_call_stacks[self._call_stack_request_count])
+ self._call_stack_request_count += 1
+ return result
+
+ def qWasmLocal(self, packet):
+ # Format: qWasmLocal:frame_index;index
+ data = packet.split(":")
+ data = data[1].split(";")
+ frame_index, local_index = data
+ if frame_index == "0" and local_index == "2":
+ return format_register_value(WASM_LOCAL_ADDR)
+ return "E03"
class TestWasm(GDBRemoteTestBase):
@@ -124,35 +209,35 @@ class TestWasm(GDBRemoteTestBase):
code_section = module.GetSectionAtIndex(0)
self.assertEqual("code", code_section.GetName())
self.assertEqual(
- load_address | code_section.GetFileOffset(),
+ LOAD_ADDRESS | code_section.GetFileOffset(),
code_section.GetLoadAddress(target),
)
debug_info_section = module.GetSectionAtIndex(1)
self.assertEqual(".debug_info", debug_info_section.GetName())
self.assertEqual(
- load_address | debug_info_section.GetFileOffset(),
+ LOAD_ADDRESS | debug_info_section.GetFileOffset(),
debug_info_section.GetLoadAddress(target),
)
debug_abbrev_section = module.GetSectionAtIndex(2)
self.assertEqual(".debug_abbrev", debug_abbrev_section.GetName())
self.assertEqual(
- load_address | debug_abbrev_section.GetFileOffset(),
+ LOAD_ADDRESS | debug_abbrev_section.GetFileOffset(),
debug_abbrev_section.GetLoadAddress(target),
)
debug_line_section = module.GetSectionAtIndex(3)
self.assertEqual(".debug_line", debug_line_section.GetName())
self.assertEqual(
- load_address | debug_line_section.GetFileOffset(),
+ LOAD_ADDRESS | debug_line_section.GetFileOffset(),
debug_line_section.GetLoadAddress(target),
)
debug_str_section = module.GetSectionAtIndex(4)
self.assertEqual(".debug_str", debug_str_section.GetName())
self.assertEqual(
- load_address | debug_line_section.GetFileOffset(),
+ LOAD_ADDRESS | debug_line_section.GetFileOffset(),
debug_line_section.GetLoadAddress(target),
)
@@ -194,97 +279,103 @@ class TestWasm(GDBRemoteTestBase):
code_section = module.GetSectionAtIndex(0)
self.assertEqual("code", code_section.GetName())
self.assertEqual(
- load_address | code_section.GetFileOffset(),
+ LOAD_ADDRESS | code_section.GetFileOffset(),
code_section.GetLoadAddress(target),
)
debug_info_section = module.GetSectionAtIndex(1)
self.assertEqual(".debug_info", debug_info_section.GetName())
self.assertEqual(
- LLDB_INVALID_ADDRESS, debug_info_section.GetLoadAddress(target)
+ lldb.LLDB_INVALID_ADDRESS, debug_info_section.GetLoadAddress(target)
)
debug_abbrev_section = module.GetSectionAtIndex(2)
self.assertEqual(".debug_abbrev", debug_abbrev_section.GetName())
self.assertEqual(
- LLDB_INVALID_ADDRESS, debug_abbrev_section.GetLoadAddress(target)
+ lldb.LLDB_INVALID_ADDRESS, debug_abbrev_section.GetLoadAddress(target)
)
debug_line_section = module.GetSectionAtIndex(3)
self.assertEqual(".debug_line", debug_line_section.GetName())
self.assertEqual(
- LLDB_INVALID_ADDRESS, debug_line_section.GetLoadAddress(target)
+ lldb.LLDB_INVALID_ADDRESS, debug_line_section.GetLoadAddress(target)
)
debug_str_section = module.GetSectionAtIndex(4)
self.assertEqual(".debug_str", debug_str_section.GetName())
self.assertEqual(
- LLDB_INVALID_ADDRESS, debug_line_section.GetLoadAddress(target)
+ lldb.LLDB_INVALID_ADDRESS, debug_line_section.GetLoadAddress(target)
)
@skipIfAsan
@skipIfXmlSupportMissing
- def test_load_module_from_file(self):
- """Test connecting to a WebAssembly engine via GDB-remote and loading a Wasm module from a file"""
-
- yaml_path = "test_wasm_embedded_debug_sections.yaml"
- yaml_base, ext = os.path.splitext(yaml_path)
+ def test_simple_wasm_debugging_session(self):
+ """Test connecting to a WebAssembly engine via GDB-remote, loading a
+ Wasm module with embedded DWARF symbols, setting a breakpoint and
+ checking the debuggee state"""
+
+ # simple.yaml was created by compiling simple.c to wasm and using
+ # obj2yaml on the output.
+ #
+ # $ clang -target wasm32 -nostdlib -Wl,--no-entry -Wl,--export-all -O0 -g -o simple.wasm simple.c
+ # $ obj2yaml simple.wasm -o simple.yaml
+ yaml_path = "simple.yaml"
+ yaml_base, _ = os.path.splitext(yaml_path)
obj_path = self.getBuildArtifact(yaml_base)
self.yaml2obj(yaml_path, obj_path)
- self.server.responder = MyResponder(obj_path)
+ # Create a fake call stack.
+ call_stacks = [
+ WasmCallStack(
+ [WasmStackFrame(0x019C), WasmStackFrame(0x01E5), WasmStackFrame(0x01FE)]
+ ),
+ ]
+
+ # Create fake memory for our wasm locals.
+ self.memory = FakeMemory(0x10000, 0x20000)
+ self.memory.store_bytes(
+ WASM_LOCAL_ADDR,
+ bytes.fromhex(
+ "0000000000000000020000000100000000000000020000000100000000000000"
+ ),
+ )
+
+ self.server.responder = MyResponder(
+ obj_path, "test_wasm", call_stacks, self.memory
+ )
target = self.dbg.CreateTarget("")
+ breakpoint = target.BreakpointCreateByName("add")
process = self.connect(target, "wasm")
lldbutil.expect_state_changes(
self, self.dbg.GetListener(), process, [lldb.eStateStopped]
)
+ location = breakpoint.GetLocationAtIndex(0)
+ self.assertTrue(location and location.IsEnabled(), VALID_BREAKPOINT_LOCATION)
+
num_modules = target.GetNumModules()
self.assertEqual(1, num_modules)
- module = target.GetModuleAtIndex(0)
- num_sections = module.GetNumSections()
- self.assertEqual(5, num_sections)
-
- code_section = module.GetSectionAtIndex(0)
- self.assertEqual("code", code_section.GetName())
- self.assertEqual(
- load_address | code_section.GetFileOffset(),
- code_section.GetLoadAddress(target),
- )
-
- debug_info_section = module.GetSectionAtIndex(1)
- self.assertEqual(".debug_info", debug_info_section.GetName())
- self.assertEqual(
- LLDB_INVALID_ADDRESS, debug_info_section.GetLoadAddress(target)
- )
-
- debug_abbrev_section = module.GetSectionAtIndex(2)
- self.assertEqual(".debug_abbrev", debug_abbrev_section.GetName())
- self.assertEqual(
- LLDB_INVALID_ADDRESS, debug_abbrev_section.GetLoadAddress(target)
- )
-
- debug_line_section = module.GetSectionAtIndex(3)
- self.assertEqual(".debug_line", debug_line_section.GetName())
- self.assertEqual(
- LLDB_INVALID_ADDRESS, debug_line_section.GetLoadAddress(target)
- )
-
- debug_str_section = module.GetSectionAtIndex(4)
- self.assertEqual(".debug_str", debug_str_section.GetName())
- self.assertEqual(
- LLDB_INVALID_ADDRESS, debug_line_section.GetLoadAddress(target)
- )
-
thread = process.GetThreadAtIndex(0)
self.assertTrue(thread.IsValid())
- frame = thread.GetFrameAtIndex(0)
- self.assertTrue(frame.IsValid())
- self.assertEqual(frame.GetPC(), 0x40000000000001B3)
-
- frame = thread.GetFrameAtIndex(1)
- self.assertTrue(frame.IsValid())
- self.assertEqual(frame.GetPC(), 0x40000000000001FE)
+ # Check that our frames match our fake call stack.
+ frame0 = thread.GetFrameAtIndex(0)
+ self.assertTrue(frame0.IsValid())
+ self.assertEqual(frame0.GetPC(), LOAD_ADDRESS | 0x019C)
+ self.assertIn("add", frame0.GetFunctionName())
+
+ frame1 = thread.GetFrameAtIndex(1)
+ self.assertTrue(frame1.IsValid())
+ self.assertEqual(frame1.GetPC(), LOAD_ADDRESS | 0x01E5)
+ self.assertIn("main", frame1.GetFunctionName())
+
+ # Check that we can resolve local variables.
+ a = frame0.FindVariable("a")
+ self.assertTrue(a.IsValid())
+ self.assertEqual(a.GetValueAsUnsigned(), 1)
+
+ b = frame0.FindVariable("b")
+ self.assertTrue(b.IsValid())
+ self.assertEqual(b.GetValueAsUnsigned(), 2)
diff --git a/lldb/test/API/functionalities/gdb_remote_client/simple.c b/lldb/test/API/functionalities/gdb_remote_client/simple.c
new file mode 100644
index 0000000..62ca1fe
--- /dev/null
+++ b/lldb/test/API/functionalities/gdb_remote_client/simple.c
@@ -0,0 +1,10 @@
+int add(int a, int b) {
+ // Break here
+ return a + b;
+}
+
+int main() {
+ int i = 1;
+ int j = 2;
+ return add(i, j);
+}
diff --git a/lldb/test/API/functionalities/gdb_remote_client/simple.yaml b/lldb/test/API/functionalities/gdb_remote_client/simple.yaml
new file mode 100644
index 0000000..cf1b7d8
--- /dev/null
+++ b/lldb/test/API/functionalities/gdb_remote_client/simple.yaml
@@ -0,0 +1,228 @@
+--- !WASM
+FileHeader:
+ Version: 0x1
+Sections:
+ - Type: TYPE
+ Signatures:
+ - Index: 0
+ ParamTypes: []
+ ReturnTypes: []
+ - Index: 1
+ ParamTypes:
+ - I32
+ - I32
+ ReturnTypes:
+ - I32
+ - Index: 2
+ ParamTypes: []
+ ReturnTypes:
+ - I32
+ - Type: FUNCTION
+ FunctionTypes: [ 0, 1, 2, 1 ]
+ - Type: TABLE
+ Tables:
+ - Index: 0
+ ElemType: FUNCREF
+ Limits:
+ Flags: [ HAS_MAX ]
+ Minimum: 0x1
+ Maximum: 0x1
+ - Type: MEMORY
+ Memories:
+ - Minimum: 0x2
+ - Type: GLOBAL
+ Globals:
+ - Index: 0
+ Type: I32
+ Mutable: true
+ InitExpr:
+ Opcode: I32_CONST
+ Value: 66560
+ - Index: 1
+ Type: I32
+ Mutable: false
+ InitExpr:
+ Opcode: I32_CONST
+ Value: 1024
+ - Index: 2
+ Type: I32
+ Mutable: false
+ InitExpr:
+ Opcode: I32_CONST
+ Value: 1024
+ - Index: 3
+ Type: I32
+ Mutable: false
+ InitExpr:
+ Opcode: I32_CONST
+ Value: 1024
+ - Index: 4
+ Type: I32
+ Mutable: false
+ InitExpr:
+ Opcode: I32_CONST
+ Value: 66560
+ - Index: 5
+ Type: I32
+ Mutable: false
+ InitExpr:
+ Opcode: I32_CONST
+ Value: 1024
+ - Index: 6
+ Type: I32
+ Mutable: false
+ InitExpr:
+ Opcode: I32_CONST
+ Value: 66560
+ - Index: 7
+ Type: I32
+ Mutable: false
+ InitExpr:
+ Opcode: I32_CONST
+ Value: 131072
+ - Index: 8
+ Type: I32
+ Mutable: false
+ InitExpr:
+ Opcode: I32_CONST
+ Value: 0
+ - Index: 9
+ Type: I32
+ Mutable: false
+ InitExpr:
+ Opcode: I32_CONST
+ Value: 1
+ - Index: 10
+ Type: I32
+ Mutable: false
+ InitExpr:
+ Opcode: I32_CONST
+ Value: 65536
+ - Type: EXPORT
+ Exports:
+ - Name: memory
+ Kind: MEMORY
+ Index: 0
+ - Name: __wasm_call_ctors
+ Kind: FUNCTION
+ Index: 0
+ - Name: add
+ Kind: FUNCTION
+ Index: 1
+ - Name: __original_main
+ Kind: FUNCTION
+ Index: 2
+ - Name: main
+ Kind: FUNCTION
+ Index: 3
+ - Name: __main_void
+ Kind: FUNCTION
+ Index: 2
+ - Name: __indirect_function_table
+ Kind: TABLE
+ Index: 0
+ - Name: __dso_handle
+ Kind: GLOBAL
+ Index: 1
+ - Name: __data_end
+ Kind: GLOBAL
+ Index: 2
+ - Name: __stack_low
+ Kind: GLOBAL
+ Index: 3
+ - Name: __stack_high
+ Kind: GLOBAL
+ Index: 4
+ - Name: __global_base
+ Kind: GLOBAL
+ Index: 5
+ - Name: __heap_base
+ Kind: GLOBAL
+ Index: 6
+ - Name: __heap_end
+ Kind: GLOBAL
+ Index: 7
+ - Name: __memory_base
+ Kind: GLOBAL
+ Index: 8
+ - Name: __table_base
+ Kind: GLOBAL
+ Index: 9
+ - Name: __wasm_first_page_end
+ Kind: GLOBAL
+ Index: 10
+ - Type: CODE
+ Functions:
+ - Index: 0
+ Locals: []
+ Body: 0B
+ - Index: 1
+ Locals:
+ - Type: I32
+ Count: 1
+ Body: 23808080800041106B21022002200036020C20022001360208200228020C20022802086A0F0B
+ - Index: 2
+ Locals:
+ - Type: I32
+ Count: 2
+ Body: 23808080800041106B210020002480808080002000410036020C2000410136020820004102360204200028020820002802041081808080002101200041106A24808080800020010F0B
+ - Index: 3
+ Locals: []
+ Body: 1082808080000F0B
+ - Type: CUSTOM
+ Name: .debug_abbrev
+ Payload: 011101250E1305030E10171B0E110155170000022E01110112064018030E3A0B3B0B271949133F1900000305000218030E3A0B3B0B49130000042E01110112064018030E3A0B3B0B49133F1900000534000218030E3A0B3B0B49130000062400030E3E0B0B0B000000
+ - Type: CUSTOM
+ Name: .debug_info
+ Payload: 940000000400000000000401620000001D0055000000000000000D000000000000000000000002050000002900000004ED00029F510000000101900000000302910C60000000010190000000030291085E00000001019000000000042F0000004C00000004ED00009F04000000010690000000050291080B0000000107900000000502910409000000010890000000000600000000050400
+ - Type: CUSTOM
+ Name: .debug_ranges
+ Payload: 050000002E0000002F0000007B0000000000000000000000
+ - Type: CUSTOM
+ Name: .debug_str
+ Payload: 696E74006D61696E006A0069002F55736572732F6A6F6E61732F7761736D2D6D6963726F2D72756E74696D652F70726F647563742D6D696E692F706C6174666F726D732F64617277696E2F6275696C64006164640073696D706C652E630062006100636C616E672076657273696F6E2032322E302E306769742028676974406769746875622E636F6D3A4A4465766C696567686572652F6C6C766D2D70726F6A6563742E67697420343161363839613132323834633834623632383933393461356338306264636534383733656466302900
+ - Type: CUSTOM
+ Name: .debug_line
+ Payload: 62000000040020000000010101FB0E0D0001010101000000010000010073696D706C652E6300000000000005020500000001050A0A08AE050E0658050C5805032002020001010005022F0000001705070A08BB75050E7505110658050A58050382020F000101
+ - Type: CUSTOM
+ Name: name
+ FunctionNames:
+ - Index: 0
+ Name: __wasm_call_ctors
+ - Index: 1
+ Name: add
+ - Index: 2
+ Name: __original_main
+ - Index: 3
+ Name: main
+ GlobalNames:
+ - Index: 0
+ Name: __stack_pointer
+ - Type: CUSTOM
+ Name: producers
+ Languages:
+ - Name: C11
+ Version: ''
+ Tools:
+ - Name: clang
+ Version: '22.0.0git'
+ - Type: CUSTOM
+ Name: target_features
+ Features:
+ - Prefix: USED
+ Name: bulk-memory
+ - Prefix: USED
+ Name: bulk-memory-opt
+ - Prefix: USED
+ Name: call-indirect-overlong
+ - Prefix: USED
+ Name: multivalue
+ - Prefix: USED
+ Name: mutable-globals
+ - Prefix: USED
+ Name: nontrapping-fptoint
+ - Prefix: USED
+ Name: reference-types
+ - Prefix: USED
+ Name: sign-ext
+...
diff --git a/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/TestOSPluginStepping.py b/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/TestOSPluginStepping.py
index 0d06a9d..dc555dd 100644
--- a/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/TestOSPluginStepping.py
+++ b/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/TestOSPluginStepping.py
@@ -123,5 +123,5 @@ class TestOSPluginStepping(TestBase):
os_thread = self.get_os_thread()
self.assertTrue(os_thread.IsValid(), "The OS thread is back after continue")
self.assertIn(
- "step out", os_thread.GetStopDescription(100), "Completed step out plan"
+ "step out", os_thread.stop_description, "Completed step out plan"
)
diff --git a/lldb/test/API/functionalities/postmortem/elf-core/expr/TestExpr.py b/lldb/test/API/functionalities/postmortem/elf-core/expr/TestExpr.py
index dd03a0c..9dfc685 100644
--- a/lldb/test/API/functionalities/postmortem/elf-core/expr/TestExpr.py
+++ b/lldb/test/API/functionalities/postmortem/elf-core/expr/TestExpr.py
@@ -37,6 +37,10 @@ class CoreExprTestCase(TestBase):
self.target.EvaluateExpression("int $my_int = 5")
self.expect_expr("$my_int * 2", result_type="int", result_value="10")
+ # Try assigning the persistent variable a new value.
+ self.target.EvaluateExpression("$my_int = 55")
+ self.expect_expr("$my_int", result_type="int", result_value="55")
+
def test_context_object(self):
"""Test expression evaluation in context of an object."""
diff --git a/lldb/test/API/functionalities/postmortem/minidump-new/TestMiniDumpNew.py b/lldb/test/API/functionalities/postmortem/minidump-new/TestMiniDumpNew.py
index 8776d72..4b7d24e 100644
--- a/lldb/test/API/functionalities/postmortem/minidump-new/TestMiniDumpNew.py
+++ b/lldb/test/API/functionalities/postmortem/minidump-new/TestMiniDumpNew.py
@@ -117,7 +117,7 @@ class MiniDumpNewTestCase(TestBase):
self.assertEqual(self.process.GetNumThreads(), 1)
thread = self.process.GetThreadAtIndex(0)
self.assertStopReason(thread.GetStopReason(), lldb.eStopReasonSignal)
- stop_description = thread.GetStopDescription(256)
+ stop_description = thread.stop_description
self.assertIn("SIGSEGV", stop_description)
@skipIfLLVMTargetMissing("X86")
@@ -153,7 +153,7 @@ class MiniDumpNewTestCase(TestBase):
self.assertEqual(self.process.GetNumThreads(), 1)
thread = self.process.GetThreadAtIndex(0)
self.assertStopReason(thread.GetStopReason(), lldb.eStopReasonNone)
- stop_description = thread.GetStopDescription(256)
+ stop_description = thread.stop_description
self.assertEqual(stop_description, "")
def test_snapshot_minidump_null_exn_code(self):
@@ -164,7 +164,7 @@ class MiniDumpNewTestCase(TestBase):
self.assertEqual(self.process.GetNumThreads(), 1)
thread = self.process.GetThreadAtIndex(0)
self.assertStopReason(thread.GetStopReason(), lldb.eStopReasonNone)
- stop_description = thread.GetStopDescription(256)
+ stop_description = thread.stop_description
self.assertEqual(stop_description, "")
def check_register_unsigned(self, set, name, expected):
@@ -198,7 +198,7 @@ class MiniDumpNewTestCase(TestBase):
self.assertEqual(self.process.GetNumThreads(), 1)
thread = self.process.GetThreadAtIndex(0)
self.assertStopReason(thread.GetStopReason(), lldb.eStopReasonNone)
- stop_description = thread.GetStopDescription(256)
+ stop_description = thread.stop_description
self.assertEqual(stop_description, "")
registers = thread.GetFrameAtIndex(0).GetRegisters()
# Verify the GPR registers are all correct
@@ -261,7 +261,7 @@ class MiniDumpNewTestCase(TestBase):
self.assertEqual(self.process.GetNumThreads(), 1)
thread = self.process.GetThreadAtIndex(0)
self.assertStopReason(thread.GetStopReason(), lldb.eStopReasonNone)
- stop_description = thread.GetStopDescription(256)
+ stop_description = thread.stop_description
self.assertEqual(stop_description, "")
registers = thread.GetFrameAtIndex(0).GetRegisters()
# Verify the GPR registers are all correct
@@ -522,7 +522,7 @@ class MiniDumpNewTestCase(TestBase):
for i in range(2):
thread = self.process.GetThreadAtIndex(i)
self.assertStopReason(thread.GetStopReason(), lldb.eStopReasonSignal)
- stop_description = thread.GetStopDescription(256)
+ stop_description = thread.stop_description
self.assertIn("SIGSEGV", stop_description)
def test_breakpoint_on_minidump(self):
@@ -539,7 +539,7 @@ class MiniDumpNewTestCase(TestBase):
process = target.LoadCore(core)
self.assertTrue(process, VALID_PROCESS)
thread = process.GetThreadAtIndex(0)
- stop_reason = thread.GetStopDescription(256)
+ stop_reason = thread.stop_description
self.assertIn("breakpoint 1.1", stop_reason)
finally:
if os.path.isfile(core):
diff --git a/lldb/test/API/functionalities/postmortem/minidump/TestMiniDump.py b/lldb/test/API/functionalities/postmortem/minidump/TestMiniDump.py
index 8fe5d2a..362b219 100644
--- a/lldb/test/API/functionalities/postmortem/minidump/TestMiniDump.py
+++ b/lldb/test/API/functionalities/postmortem/minidump/TestMiniDump.py
@@ -32,7 +32,7 @@ class MiniDumpTestCase(TestBase):
self.assertEqual(self.process.GetNumThreads(), 1)
thread = self.process.GetThreadAtIndex(0)
self.assertStopReason(thread.GetStopReason(), lldb.eStopReasonException)
- stop_description = thread.GetStopDescription(256)
+ stop_description = thread.stop_description
self.assertIn("0xc0000005", stop_description)
def test_modules_in_mini_dump(self):
diff --git a/lldb/test/API/functionalities/scripted_process/stack_core_scripted_process.py b/lldb/test/API/functionalities/scripted_process/stack_core_scripted_process.py
index 736bb69..ee5ae32 100644
--- a/lldb/test/API/functionalities/scripted_process/stack_core_scripted_process.py
+++ b/lldb/test/API/functionalities/scripted_process/stack_core_scripted_process.py
@@ -204,9 +204,7 @@ class StackCoreScriptedThread(ScriptedThread):
if self.is_stopped:
if "arm64" in self.scripted_process.arch:
stop_reason["type"] = lldb.eStopReasonException
- stop_reason["data"][
- "desc"
- ] = self.corefile_thread.GetStopDescription(100)
+ stop_reason["data"]["desc"] = self.corefile_thread.stop_description
elif self.scripted_process.arch == "x86_64":
stop_reason["type"] = lldb.eStopReasonSignal
stop_reason["data"]["signal"] = signal.SIGTRAP
diff --git a/lldb/test/API/functionalities/step_scripted/TestStepScripted.py b/lldb/test/API/functionalities/step_scripted/TestStepScripted.py
index 5276369..343236a 100644
--- a/lldb/test/API/functionalities/step_scripted/TestStepScripted.py
+++ b/lldb/test/API/functionalities/step_scripted/TestStepScripted.py
@@ -41,7 +41,7 @@ class StepScriptedTestCase(TestBase):
frame = thread.GetFrameAtIndex(0)
self.assertEqual("main", frame.GetFunctionName())
- stop_desc = thread.GetStopDescription(1000)
+ stop_desc = thread.stop_description
self.assertIn("Stepping out from", stop_desc, "Got right description")
def run_until_branch_instruction(self):
@@ -153,7 +153,7 @@ class StepScriptedTestCase(TestBase):
self.assertTrue(foo_val.GetValueDidChange(), "Foo changed")
# And we should have a reasonable stop description:
- desc = thread.GetStopDescription(1000)
+ desc = thread.stop_description
self.assertIn("Stepped until foo changed", desc, "Got right stop description")
def test_stop_others_from_command(self):
diff --git a/lldb/test/API/functionalities/tail_call_frames/cross_dso/TestCrossDSOTailCalls.py b/lldb/test/API/functionalities/tail_call_frames/cross_dso/TestCrossDSOTailCalls.py
index 7c3d09b..0ca2f9e5 100644
--- a/lldb/test/API/functionalities/tail_call_frames/cross_dso/TestCrossDSOTailCalls.py
+++ b/lldb/test/API/functionalities/tail_call_frames/cross_dso/TestCrossDSOTailCalls.py
@@ -11,7 +11,6 @@ class TestCrossDSOTailCalls(TestBase):
@skipIf(compiler="clang", compiler_version=["<", "10.0"])
@skipIf(dwarf_version=["<", "4"])
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr26265")
- @expectedFailureAll(archs=["arm$", "arm64", "aarch64"], bugnumber="llvm.org/PR44561")
def test_cross_dso_tail_calls(self):
self.build()
exe = self.getBuildArtifact("a.out")
diff --git a/lldb/test/API/functionalities/tail_call_frames/cross_object/TestCrossObjectTailCalls.py b/lldb/test/API/functionalities/tail_call_frames/cross_object/TestCrossObjectTailCalls.py
index 180f4d3..b5de75e 100644
--- a/lldb/test/API/functionalities/tail_call_frames/cross_object/TestCrossObjectTailCalls.py
+++ b/lldb/test/API/functionalities/tail_call_frames/cross_object/TestCrossObjectTailCalls.py
@@ -11,7 +11,6 @@ class TestCrossObjectTailCalls(TestBase):
@skipIf(compiler="clang", compiler_version=["<", "10.0"])
@skipIf(dwarf_version=["<", "4"])
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr26265")
- @expectedFailureAll(archs=["arm$", "arm64", "aarch64"], bugnumber="llvm.org/PR44561")
def test_cross_object_tail_calls(self):
self.build()
exe = self.getBuildArtifact("a.out")
diff --git a/lldb/test/API/functionalities/tsan/multiple/TestTsanMultiple.py b/lldb/test/API/functionalities/tsan/multiple/TestTsanMultiple.py
index 435e180..aa2d1d9 100644
--- a/lldb/test/API/functionalities/tsan/multiple/TestTsanMultiple.py
+++ b/lldb/test/API/functionalities/tsan/multiple/TestTsanMultiple.py
@@ -49,7 +49,7 @@ class TsanMultipleTestCase(TestBase):
stop_description = (
self.dbg.GetSelectedTarget()
.process.GetSelectedThread()
- .GetStopDescription(100)
+ .stop_description
)
self.assertTrue(
diff --git a/lldb/test/API/lang/cpp/expr-definition-in-dylib/Makefile b/lldb/test/API/lang/cpp/expr-definition-in-dylib/Makefile
new file mode 100644
index 0000000..82daeb1
--- /dev/null
+++ b/lldb/test/API/lang/cpp/expr-definition-in-dylib/Makefile
@@ -0,0 +1,6 @@
+CXX_SOURCES := main.cpp
+
+DYLIB_CXX_SOURCES := lib.cpp
+DYLIB_NAME := lib
+
+include Makefile.rules
diff --git a/lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py b/lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py
new file mode 100644
index 0000000..02c34b3
--- /dev/null
+++ b/lldb/test/API/lang/cpp/expr-definition-in-dylib/TestExprDefinitionInDylib.py
@@ -0,0 +1,33 @@
+import lldb
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
+
+
+class ExprDefinitionInDylibTestCase(TestBase):
+ NO_DEBUG_INFO_TESTCASE = True
+
+ @skipIfWindows
+ def test(self):
+ """
+ Tests that we can call functions whose definition
+ is in a different LLDB module than it's declaration.
+ """
+ self.build()
+
+ target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
+ self.assertTrue(target, VALID_TARGET)
+
+ env = self.registerSharedLibrariesWithTarget(target, ["lib"])
+
+ breakpoint = lldbutil.run_break_set_by_file_and_line(
+ self, "main.cpp", line_number("main.cpp", "return")
+ )
+
+ process = target.LaunchSimple(None, env, self.get_process_working_directory())
+
+ self.assertIsNotNone(
+ lldbutil.get_one_thread_stopped_at_breakpoint_id(self.process(), breakpoint)
+ )
+
+ self.expect_expr("f.method()", result_value="-72", result_type="int")
diff --git a/lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.cpp b/lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.cpp
new file mode 100644
index 0000000..ad148ce
--- /dev/null
+++ b/lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.cpp
@@ -0,0 +1,3 @@
+#include "lib.h"
+
+int Foo::method() { return -72; }
diff --git a/lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.h b/lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.h
new file mode 100644
index 0000000..9568db2
--- /dev/null
+++ b/lldb/test/API/lang/cpp/expr-definition-in-dylib/lib.h
@@ -0,0 +1,8 @@
+#ifndef LIB_H_IN
+#define LIB_H_IN
+
+struct Foo {
+ int method();
+};
+
+#endif // LIB_H_IN
diff --git a/lldb/test/API/lang/cpp/expr-definition-in-dylib/main.cpp b/lldb/test/API/lang/cpp/expr-definition-in-dylib/main.cpp
new file mode 100644
index 0000000..2fddb2b
--- /dev/null
+++ b/lldb/test/API/lang/cpp/expr-definition-in-dylib/main.cpp
@@ -0,0 +1,6 @@
+#include "lib.h"
+
+int main() {
+ Foo f;
+ return f.method();
+}
diff --git a/lldb/test/API/macosx/abort_with_payload/TestAbortWithPayload.py b/lldb/test/API/macosx/abort_with_payload/TestAbortWithPayload.py
index c10d958..a850908 100644
--- a/lldb/test/API/macosx/abort_with_payload/TestAbortWithPayload.py
+++ b/lldb/test/API/macosx/abort_with_payload/TestAbortWithPayload.py
@@ -61,7 +61,7 @@ class TestAbortWithPayload(TestBase):
self.assertEqual(thread, sel_thread, "Selected the original thread")
# Make sure the stop reason is right:
self.assertEqual(
- thread.GetStopDescription(100),
+ thread.stop_description,
"abort with payload or reason",
"Description was right",
)
diff --git a/lldb/test/API/macosx/corefile-exception-reason/TestCorefileExceptionReason.py b/lldb/test/API/macosx/corefile-exception-reason/TestCorefileExceptionReason.py
index ada74a1..e452bb5 100644
--- a/lldb/test/API/macosx/corefile-exception-reason/TestCorefileExceptionReason.py
+++ b/lldb/test/API/macosx/corefile-exception-reason/TestCorefileExceptionReason.py
@@ -44,7 +44,7 @@ class TestCorefileExceptionReason(TestBase):
self.runCmd("fr v")
self.assertEqual(
- thread.GetStopDescription(256), "ESR_EC_DABORT_EL0 (fault address: 0x0)"
+ thread.stop_description, "ESR_EC_DABORT_EL0 (fault address: 0x0)"
)
if self.TraceOn():
diff --git a/lldb/test/API/riscv/break-undecoded/TestBreakpointIllegal.py b/lldb/test/API/riscv/break-undecoded/TestBreakpointIllegal.py
index 41e8901..5b00298 100644
--- a/lldb/test/API/riscv/break-undecoded/TestBreakpointIllegal.py
+++ b/lldb/test/API/riscv/break-undecoded/TestBreakpointIllegal.py
@@ -17,7 +17,7 @@ class TestBreakpointIllegal(TestBase):
)
self.runCmd("thread step-inst")
# we need to step more, as some compilers do not set appropriate debug info.
- while cur_thread.GetStopDescription(256) == "instruction step into":
+ while cur_thread.stop_description == "instruction step into":
self.runCmd("thread step-inst")
# The stop reason of the thread should be illegal opcode.
self.expect(
@@ -34,7 +34,7 @@ class TestBreakpointIllegal(TestBase):
)
self.runCmd("thread step-inst")
# we need to step more, as some compilers do not set appropriate debug info.
- while cur_thread.GetStopDescription(256) == "instruction step into":
+ while cur_thread.stop_description == "instruction step into":
self.runCmd("thread step-inst")
# The stop reason of the thread should be illegal opcode.
self.expect(
diff --git a/lldb/test/Shell/Recognizer/Inputs/ubsan_add_overflow.c b/lldb/test/Shell/Recognizer/Inputs/ubsan_add_overflow.c
new file mode 100644
index 0000000..9f12c32
--- /dev/null
+++ b/lldb/test/Shell/Recognizer/Inputs/ubsan_add_overflow.c
@@ -0,0 +1,3 @@
+#include <limits.h>
+
+int main() { return INT_MAX + 1; }
diff --git a/lldb/test/Shell/Recognizer/ubsan_add_overflow.test b/lldb/test/Shell/Recognizer/ubsan_add_overflow.test
new file mode 100644
index 0000000..a5e95cf
--- /dev/null
+++ b/lldb/test/Shell/Recognizer/ubsan_add_overflow.test
@@ -0,0 +1,22 @@
+# UNSUPPORTED: system-windows
+
+# RUN: %clang_host -g -O0 %S/Inputs/ubsan_add_overflow.c -o %t.out \
+# RUN: -fsanitize=signed-integer-overflow -fsanitize-trap=signed-integer-overflow
+
+# RUN: %lldb -b -s %s %t.out | FileCheck %s
+
+run
+# CHECK: thread #{{.*}} stop reason = Undefined Behavior Sanitizer: Integer addition overflowed
+# CHECK-NEXT: frame #1: {{.*}}`main at ubsan_add_overflow.c
+
+bt
+# CHECK: frame #0: {{.*}}`__clang_trap_msg$Undefined Behavior Sanitizer$Integer addition overflowed{{.*}}
+# CHECK: frame #1: {{.*}}`main at ubsan_add_overflow.c
+
+frame info
+# CHECK: frame #{{.*}}`main at ubsan_add_overflow.c
+
+frame recognizer info 0
+# CHECK: frame 0 is recognized by Verbose Trap StackFrame Recognizer
+
+quit
diff --git a/lldb/test/Shell/Scripts/TestFrameworkFixScript.test b/lldb/test/Shell/Scripts/TestFrameworkFixScript.test
index 2b1818e..183ea3a 100644
--- a/lldb/test/Shell/Scripts/TestFrameworkFixScript.test
+++ b/lldb/test/Shell/Scripts/TestFrameworkFixScript.test
@@ -1,6 +1,6 @@
# Create a temp dir for output and run the framework fix script on the truncated version of SBAddress.h in the inputs dir.
RUN: mkdir -p %t/Outputs
-RUN: %python %p/../../../scripts/framework-header-fix.py -f lldb_main -i %p/Inputs/Main/SBAddress.h -o %t/Outputs/SBAddress.h -p /usr/bin/unifdef --unifdef_guards USWIG
+RUN: %python %p/../../../scripts/framework-header-fix.py -f lldb_main -i %p/Inputs/Main/SBAddress.h -o %t/Outputs/SBAddress.h -p /usr/bin/unifdef --unifdef_guards SWIG
# Check the output
RUN: cat %t/Outputs/SBAddress.h | FileCheck %s
diff --git a/lldb/test/Shell/Scripts/TestFrameworkFixUnifdef.test b/lldb/test/Shell/Scripts/TestFrameworkFixUnifdef.test
index ba18b4b..a4fffe4 100644
--- a/lldb/test/Shell/Scripts/TestFrameworkFixUnifdef.test
+++ b/lldb/test/Shell/Scripts/TestFrameworkFixUnifdef.test
@@ -1,7 +1,7 @@
# REQUIRES: system-darwin
# Create a temp dir for output and run the framework fix script on the truncated version of SBAddress.h in the inputs dir.
RUN: mkdir -p %t/Outputs
-RUN: %python %p/../../../scripts/framework-header-fix.py -f lldb_main -i %p/Inputs/Main/SBAddress.h -o %t/Outputs/SBAddress.h -p /usr/bin/unifdef --unifdef_guards USWIG
+RUN: %python %p/../../../scripts/framework-header-fix.py -f lldb_main -i %p/Inputs/Main/SBAddress.h -o %t/Outputs/SBAddress.h -p /usr/bin/unifdef --unifdef_guards SWIG
# Check the output
RUN: cat %t/Outputs/SBAddress.h | FileCheck %s
diff --git a/lldb/test/Shell/Scripts/TestRPCFrameworkFixScript.test b/lldb/test/Shell/Scripts/TestRPCFrameworkFixScript.test
index e2080ca..d7775c2 100644
--- a/lldb/test/Shell/Scripts/TestRPCFrameworkFixScript.test
+++ b/lldb/test/Shell/Scripts/TestRPCFrameworkFixScript.test
@@ -1,6 +1,6 @@
# Create a temp dir for output and run the framework fix script on the truncated version of SBAddress.h in the inputs dir.
RUN: mkdir -p %t/Outputs
-RUN: %python %p/../../../scripts/framework-header-fix.py -f lldb_rpc -i %p/Inputs/RPC/RPCSBAddress.h -o %t/Outputs/RPCSBAddress.h -p /usr/bin/unifdef --unifdef_guards USWIG
+RUN: %python %p/../../../scripts/framework-header-fix.py -f lldb_rpc -i %p/Inputs/RPC/RPCSBAddress.h -o %t/Outputs/RPCSBAddress.h -p /usr/bin/unifdef --unifdef_guards SWIG
# Check the output
RUN: cat %t/Outputs/RPCSBAddress.h | FileCheck %s
diff --git a/lldb/tools/lldb-rpc/LLDBRPCHeaders.cmake b/lldb/tools/lldb-rpc/LLDBRPCHeaders.cmake
index 6c363f4..2376e23 100644
--- a/lldb/tools/lldb-rpc/LLDBRPCHeaders.cmake
+++ b/lldb/tools/lldb-rpc/LLDBRPCHeaders.cmake
@@ -79,7 +79,7 @@ function(FixIncludePaths in subfolder out)
add_custom_command(OUTPUT ${parked_header}
COMMAND ${LLDB_SOURCE_DIR}/scripts/framework-header-fix.py
- -f lldb_rpc -i ${in} -o ${parked_header} -p ${unifdef_EXECUTABLE} --unifdef_guards USWIG
+ -f lldb_rpc -i ${in} -o ${parked_header} -p ${unifdef_EXECUTABLE} --unifdef_guards SWIG
DEPENDS ${in}
COMMENT "Fixing includes in ${in}"
)
diff --git a/lldb/unittests/Core/DumpDataExtractorTest.cpp b/lldb/unittests/Core/DumpDataExtractorTest.cpp
index 3d1e8bc..6302f1e 100644
--- a/lldb/unittests/Core/DumpDataExtractorTest.cpp
+++ b/lldb/unittests/Core/DumpDataExtractorTest.cpp
@@ -163,6 +163,9 @@ TEST_F(DumpDataExtractorTest, Formats) {
TestDump(0xcafef00d, lldb::Format::eFormatHex, "0xcafef00d");
TestDump(0xcafef00d, lldb::Format::eFormatHexUppercase, "0xCAFEF00D");
TestDump(0.456, lldb::Format::eFormatFloat, "0.45600000000000002");
+ TestDump(std::vector<uint64_t>{0x47ae147ae147ae14, 0x40011147ae147ae1},
+ lldb::Format::eFormatFloat128,
+ "4.26999999999999999999999999999999963");
TestDump(9, lldb::Format::eFormatOctal, "011");
// Chars packed into an integer.
TestDump<uint32_t>(0x4C4C4442, lldb::Format::eFormatOSType, "'LLDB'");
@@ -388,6 +391,9 @@ TEST_F(DumpDataExtractorTest, ItemByteSizeErrors) {
TestDumpWithItemByteSize(
18, lldb::Format::eFormatFloat,
"error: unsupported byte size (18) for float format");
+ TestDumpWithItemByteSize(
+ 17, lldb::Format::eFormatFloat128,
+ "error: unsupported byte size (17) for float format");
// We want sizes to exactly match one of float/double.
TestDumpWithItemByteSize(
diff --git a/lldb/unittests/Expression/CMakeLists.txt b/lldb/unittests/Expression/CMakeLists.txt
index 533cdc6..4c58b3c 100644
--- a/lldb/unittests/Expression/CMakeLists.txt
+++ b/lldb/unittests/Expression/CMakeLists.txt
@@ -4,6 +4,7 @@ add_lldb_unittest(ExpressionTests
DiagnosticManagerTest.cpp
DWARFExpressionTest.cpp
CppModuleConfigurationTest.cpp
+ ExpressionTest.cpp
LINK_LIBS
lldbCore
diff --git a/lldb/unittests/Expression/ExpressionTest.cpp b/lldb/unittests/Expression/ExpressionTest.cpp
new file mode 100644
index 0000000..12f6dd5
--- /dev/null
+++ b/lldb/unittests/Expression/ExpressionTest.cpp
@@ -0,0 +1,122 @@
+//===-- ExpressionTest.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+#include "TestingSupport/TestUtilities.h"
+#include "lldb/Expression/Expression.h"
+#include "llvm/Testing/Support/Error.h"
+
+using namespace lldb_private;
+
+struct LabelTestCase {
+ llvm::StringRef encoded;
+ FunctionCallLabel label;
+ llvm::SmallVector<llvm::StringRef> error_pattern;
+};
+
+static LabelTestCase g_label_test_cases[] = {
+ // Failure modes
+ {"bar:0x0:0x0:_Z3foov",
+ {},
+ {"expected function call label prefix '$__lldb_func' but found 'bar' "
+ "instead."}},
+ {"$__lldb_func :0x0:0x0:_Z3foov",
+ {},
+ {"expected function call label prefix '$__lldb_func' but found "
+ "'$__lldb_func ' instead."}},
+ {"$__lldb_funcc:0x0:0x0:_Z3foov",
+ {},
+ {"expected function call label prefix '$__lldb_func' but found "
+ "'$__lldb_funcc' instead."}},
+ {"", {}, {"malformed function call label."}},
+ {"foo", {}, {"malformed function call label."}},
+ {"$__lldb_func", {}, {"malformed function call label."}},
+ {"$__lldb_func:", {}, {"malformed function call label."}},
+ {"$__lldb_func:0x0:0x0", {}, {"malformed function call label."}},
+ {"$__lldb_func:abc:0x0:_Z3foov",
+ {},
+ {"failed to parse module ID from 'abc'."}},
+ {"$__lldb_func:-1:0x0:_Z3foov",
+ {},
+ {"failed to parse module ID from '-1'."}},
+ {"$__lldb_func:0x0invalid:0x0:_Z3foov",
+ {},
+ {"failed to parse module ID from '0x0invalid'."}},
+ {"$__lldb_func:0x0 :0x0:_Z3foov",
+ {},
+ {"failed to parse module ID from '0x0 '."}},
+ {"$__lldb_func:0x0:abc:_Z3foov",
+ {},
+ {"failed to parse symbol ID from 'abc'."}},
+ {"$__lldb_func:0x5:-1:_Z3foov",
+ {},
+ {"failed to parse symbol ID from '-1'."}},
+ {"$__lldb_func:0x5:0x0invalid:_Z3foov",
+ {},
+ {"failed to parse symbol ID from '0x0invalid'."}},
+ {"$__lldb_func:0x5:0x0 :_Z3foov",
+ {},
+ {"failed to parse symbol ID from '0x0 '."}},
+ {"$__lldb_func:0x0:0x0:_Z3foov",
+ {
+ /*.module_id=*/0x0,
+ /*.symbol_id=*/0x0,
+ /*.lookup_name=*/"_Z3foov",
+ },
+ {}},
+ {"$__lldb_func:0x0:0x0:abc:def:::a",
+ {
+ /*.module_id=*/0x0,
+ /*.symbol_id=*/0x0,
+ /*.lookup_name=*/"abc:def:::a",
+ },
+ {}},
+ {"$__lldb_func:0xd2:0xf0:$__lldb_func",
+ {
+ /*.module_id=*/0xd2,
+ /*.symbol_id=*/0xf0,
+ /*.lookup_name=*/"$__lldb_func",
+ },
+ {}},
+};
+
+struct ExpressionTestFixture : public testing::TestWithParam<LabelTestCase> {};
+
+TEST_P(ExpressionTestFixture, FunctionCallLabel) {
+ const auto &[encoded, label, errors] = GetParam();
+
+ auto decoded_or_err = FunctionCallLabel::fromString(encoded);
+ if (!errors.empty()) {
+ EXPECT_THAT_EXPECTED(
+ decoded_or_err,
+ llvm::FailedWithMessageArray(testing::ElementsAreArray(errors)));
+ return;
+ }
+
+ EXPECT_THAT_EXPECTED(decoded_or_err, llvm::Succeeded());
+
+ auto label_str = label.toString();
+ EXPECT_EQ(decoded_or_err->toString(), encoded);
+ EXPECT_EQ(label_str, encoded);
+
+ EXPECT_EQ(decoded_or_err->module_id, label.module_id);
+ EXPECT_EQ(decoded_or_err->symbol_id, label.symbol_id);
+ EXPECT_EQ(decoded_or_err->lookup_name, label.lookup_name);
+
+ auto roundtrip_or_err = FunctionCallLabel::fromString(label_str);
+ EXPECT_THAT_EXPECTED(roundtrip_or_err, llvm::Succeeded());
+
+ EXPECT_EQ(roundtrip_or_err->module_id, label.module_id);
+ EXPECT_EQ(roundtrip_or_err->symbol_id, label.symbol_id);
+ EXPECT_EQ(roundtrip_or_err->lookup_name, label.lookup_name);
+}
+
+INSTANTIATE_TEST_SUITE_P(FunctionCallLabelTest, ExpressionTestFixture,
+ testing::ValuesIn(g_label_test_cases));
diff --git a/lldb/unittests/Host/FileSystemTest.cpp b/lldb/unittests/Host/FileSystemTest.cpp
index 58887f6..e3c2b5a 100644
--- a/lldb/unittests/Host/FileSystemTest.cpp
+++ b/lldb/unittests/Host/FileSystemTest.cpp
@@ -186,7 +186,7 @@ TEST(FileSystemTest, FileAndDirectoryComponents) {
}
static IntrusiveRefCntPtr<DummyFileSystem> GetSimpleDummyFS() {
- IntrusiveRefCntPtr<DummyFileSystem> D(new DummyFileSystem());
+ auto D = makeIntrusiveRefCnt<DummyFileSystem>();
D->addRegularFile("/foo");
D->addDirectory("/bar");
D->addSymlink("/baz");
diff --git a/lldb/unittests/Symbol/TestTypeSystemClang.cpp b/lldb/unittests/Symbol/TestTypeSystemClang.cpp
index 71930ab..b993b82 100644
--- a/lldb/unittests/Symbol/TestTypeSystemClang.cpp
+++ b/lldb/unittests/Symbol/TestTypeSystemClang.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
+#include "llvm/IR/GlobalValue.h"
#include "gtest/gtest.h"
using namespace clang;
@@ -76,6 +77,8 @@ TEST_F(TestTypeSystemClang, TestGetBasicTypeFromEnum) {
context.getComplexType(context.FloatTy)));
EXPECT_TRUE(
context.hasSameType(GetBasicQualType(eBasicTypeHalf), context.HalfTy));
+ EXPECT_TRUE(context.hasSameType(GetBasicQualType(eBasicTypeFloat128),
+ context.Float128Ty));
EXPECT_TRUE(
context.hasSameType(GetBasicQualType(eBasicTypeInt), context.IntTy));
EXPECT_TRUE(context.hasSameType(GetBasicQualType(eBasicTypeInt128),
@@ -869,7 +872,7 @@ TEST_F(TestTypeSystemClang, TestFunctionTemplateConstruction) {
CompilerType clang_type = m_ast->CreateFunctionType(int_type, {}, false, 0U);
FunctionDecl *func = m_ast->CreateFunctionDeclaration(
TU, OptionalClangModuleID(), "foo", clang_type, StorageClass::SC_None,
- false);
+ false, /*asm_label=*/{});
TypeSystemClang::TemplateParameterInfos empty_params;
// Create the actual function template.
@@ -900,7 +903,7 @@ TEST_F(TestTypeSystemClang, TestFunctionTemplateInRecordConstruction) {
// 2. It is mirroring the behavior of DWARFASTParserClang::ParseSubroutine.
FunctionDecl *func = m_ast->CreateFunctionDeclaration(
TU, OptionalClangModuleID(), "foo", clang_type, StorageClass::SC_None,
- false);
+ false, /*asm_label=*/{});
TypeSystemClang::TemplateParameterInfos empty_params;
// Create the actual function template.
@@ -938,7 +941,7 @@ TEST_F(TestTypeSystemClang, TestDeletingImplicitCopyCstrDueToMoveCStr) {
bool is_attr_used = false;
bool is_artificial = false;
m_ast->AddMethodToCXXRecordType(
- t.GetOpaqueQualType(), class_name, nullptr, function_type,
+ t.GetOpaqueQualType(), class_name, /*asm_label=*/{}, function_type,
lldb::AccessType::eAccessPublic, is_virtual, is_static, is_inline,
is_explicit, is_attr_used, is_artificial);
@@ -975,7 +978,7 @@ TEST_F(TestTypeSystemClang, TestNotDeletingUserCopyCstrDueToMoveCStr) {
CompilerType function_type = m_ast->CreateFunctionType(
return_type, args, /*variadic=*/false, /*quals*/ 0U);
m_ast->AddMethodToCXXRecordType(
- t.GetOpaqueQualType(), class_name, nullptr, function_type,
+ t.GetOpaqueQualType(), class_name, /*asm_label=*/{}, function_type,
lldb::AccessType::eAccessPublic, is_virtual, is_static, is_inline,
is_explicit, is_attr_used, is_artificial);
}
@@ -987,7 +990,7 @@ TEST_F(TestTypeSystemClang, TestNotDeletingUserCopyCstrDueToMoveCStr) {
m_ast->CreateFunctionType(return_type, args,
/*variadic=*/false, /*quals*/ 0U);
m_ast->AddMethodToCXXRecordType(
- t.GetOpaqueQualType(), class_name, nullptr, function_type,
+ t.GetOpaqueQualType(), class_name, /*asm_label=*/{}, function_type,
lldb::AccessType::eAccessPublic, is_virtual, is_static, is_inline,
is_explicit, is_attr_used, is_artificial);
}
@@ -1098,7 +1101,7 @@ TEST_F(TestTypeSystemClang, AddMethodToCXXRecordType_ParmVarDecls) {
m_ast->CreateFunctionType(return_type, param_types,
/*variadic=*/false, /*quals*/ 0U);
m_ast->AddMethodToCXXRecordType(
- t.GetOpaqueQualType(), "myFunc", nullptr, function_type,
+ t.GetOpaqueQualType(), "myFunc", /*asm_label=*/{}, function_type,
lldb::AccessType::eAccessPublic, is_virtual, is_static, is_inline,
is_explicit, is_attr_used, is_artificial);
@@ -1116,3 +1119,130 @@ TEST_F(TestTypeSystemClang, AddMethodToCXXRecordType_ParmVarDecls) {
EXPECT_EQ(method_it->getParamDecl(0)->getDeclContext(), *method_it);
EXPECT_EQ(method_it->getParamDecl(1)->getDeclContext(), *method_it);
}
+
+TEST_F(TestTypeSystemClang, AsmLabel_CtorDtor) {
+ // Tests TypeSystemClang::DeclGetMangledName for constructors/destructors
+ // with and without AsmLabels.
+
+ llvm::StringRef class_name = "S";
+ CompilerType t = clang_utils::createRecord(*m_ast, class_name);
+ m_ast->StartTagDeclarationDefinition(t);
+
+ CompilerType return_type = m_ast->GetBasicType(lldb::eBasicTypeVoid);
+ const bool is_virtual = false;
+ const bool is_static = false;
+ const bool is_inline = false;
+ const bool is_explicit = true;
+ const bool is_attr_used = false;
+ const bool is_artificial = false;
+
+ CompilerType function_type =
+ m_ast->CreateFunctionType(return_type, {},
+ /*variadic=*/false, /*quals*/ 0U);
+ auto *ctor_nolabel = m_ast->AddMethodToCXXRecordType(
+ t.GetOpaqueQualType(), "S", /*asm_label=*/{}, function_type,
+ lldb::AccessType::eAccessPublic, is_virtual, is_static, is_inline,
+ is_explicit, is_attr_used, is_artificial);
+
+ auto *dtor_nolabel = m_ast->AddMethodToCXXRecordType(
+ t.GetOpaqueQualType(), "~S", /*asm_label=*/{}, function_type,
+ lldb::AccessType::eAccessPublic, is_virtual, is_static, is_inline,
+ is_explicit, is_attr_used, is_artificial);
+
+ auto *ctor = m_ast->AddMethodToCXXRecordType(
+ t.GetOpaqueQualType(), "S", /*asm_label=*/"$__lldb_func:0x0:0x0:S",
+ function_type, lldb::AccessType::eAccessPublic, is_virtual, is_static,
+ is_inline, is_explicit, is_attr_used, is_artificial);
+
+ auto *dtor = m_ast->AddMethodToCXXRecordType(
+ t.GetOpaqueQualType(), "~S", /*asm_label=*/"$__lldb_func:0x0:0x0:~S",
+ function_type, lldb::AccessType::eAccessPublic, is_virtual, is_static,
+ is_inline, is_explicit, is_attr_used, is_artificial);
+
+ m_ast->CompleteTagDeclarationDefinition(t);
+
+ ASSERT_TRUE(ctor_nolabel);
+ ASSERT_TRUE(dtor_nolabel);
+ ASSERT_TRUE(ctor);
+ ASSERT_TRUE(dtor);
+
+#ifdef _WIN32
+ EXPECT_STREQ(m_ast->DeclGetMangledName(ctor_nolabel).GetCString(),
+ "??0S@@QEAA@XZ");
+ EXPECT_STREQ(m_ast->DeclGetMangledName(dtor_nolabel).GetCString(),
+ "??_DS@@QEAAXXZ");
+#else
+ EXPECT_STREQ(m_ast->DeclGetMangledName(ctor_nolabel).GetCString(),
+ "_ZN1SC1Ev");
+ EXPECT_STREQ(m_ast->DeclGetMangledName(dtor_nolabel).GetCString(),
+ "_ZN1SD1Ev");
+#endif
+
+ EXPECT_STREQ(llvm::GlobalValue::dropLLVMManglingEscape(
+ m_ast->DeclGetMangledName(ctor).GetStringRef())
+ .data(),
+ "$__lldb_func:0x0:0x0:S");
+ EXPECT_STREQ(llvm::GlobalValue::dropLLVMManglingEscape(
+ m_ast->DeclGetMangledName(dtor).GetStringRef())
+ .data(),
+ "$__lldb_func:0x0:0x0:~S");
+}
+
+struct AsmLabelTestCase {
+ llvm::StringRef mangled;
+ llvm::StringRef expected;
+};
+
+class TestTypeSystemClangAsmLabel
+ : public testing::TestWithParam<AsmLabelTestCase> {
+public:
+ SubsystemRAII<FileSystem, HostInfo> subsystems;
+
+ void SetUp() override {
+ m_holder =
+ std::make_unique<clang_utils::TypeSystemClangHolder>("test ASTContext");
+ m_ast = m_holder->GetAST();
+ }
+
+ void TearDown() override {
+ m_ast = nullptr;
+ m_holder.reset();
+ }
+
+protected:
+ TypeSystemClang *m_ast = nullptr;
+ std::unique_ptr<clang_utils::TypeSystemClangHolder> m_holder;
+};
+
+static AsmLabelTestCase g_asm_label_test_cases[] = {
+ {/*mangled=*/"$__lldb_func:0x0:0x0:_Z3foov",
+ /*expected=*/"_Z3foov"},
+ {/*mangled=*/"$__lldb_func:0x0:0x0:foo",
+ /*expected=*/"$__lldb_func:0x0:0x0:foo"},
+ {/*mangled=*/"foo",
+ /*expected=*/"foo"},
+ {/*mangled=*/"_Z3foov",
+ /*expected=*/"_Z3foov"},
+ {/*mangled=*/"$__lldb_func:",
+ /*expected=*/"$__lldb_func:"},
+};
+
+TEST_P(TestTypeSystemClangAsmLabel, DeclGetMangledName) {
+ const auto &[mangled, expected] = GetParam();
+
+ CompilerType int_type = m_ast->GetBasicType(lldb::eBasicTypeInt);
+ clang::TranslationUnitDecl *TU = m_ast->GetTranslationUnitDecl();
+
+ // Prepare the declarations/types we need for the template.
+ CompilerType clang_type = m_ast->CreateFunctionType(int_type, {}, false, 0U);
+ FunctionDecl *func = m_ast->CreateFunctionDeclaration(
+ TU, OptionalClangModuleID(), "foo", clang_type, StorageClass::SC_None,
+ false, /*asm_label=*/mangled);
+
+ ASSERT_EQ(llvm::GlobalValue::dropLLVMManglingEscape(
+ m_ast->DeclGetMangledName(func).GetStringRef()),
+ expected);
+}
+
+INSTANTIATE_TEST_SUITE_P(AsmLabelTests, TestTypeSystemClangAsmLabel,
+ testing::ValuesIn(g_asm_label_test_cases));
diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt
index e874d76..4c70b98 100644
--- a/llvm/CMakeLists.txt
+++ b/llvm/CMakeLists.txt
@@ -149,7 +149,7 @@ endforeach()
# As we migrate runtimes to using the bootstrapping build, the set of default runtimes
# should grow as we remove those runtimes from LLVM_ENABLE_PROJECTS above.
set(LLVM_DEFAULT_RUNTIMES "libcxx;libcxxabi;libunwind")
-set(LLVM_SUPPORTED_RUNTIMES "libc;libunwind;libcxxabi;libcxx;compiler-rt;openmp;llvm-libgcc;offload;flang-rt;libclc")
+set(LLVM_SUPPORTED_RUNTIMES "libc;libunwind;libcxxabi;libcxx;compiler-rt;openmp;llvm-libgcc;offload;flang-rt;libclc;libsycl")
set(LLVM_ENABLE_RUNTIMES "" CACHE STRING
"Semicolon-separated list of runtimes to build, or \"all\" (${LLVM_DEFAULT_RUNTIMES}). Supported runtimes are ${LLVM_SUPPORTED_RUNTIMES}.")
if(LLVM_ENABLE_RUNTIMES STREQUAL "all")
diff --git a/llvm/docs/CommandGuide/lit.rst b/llvm/docs/CommandGuide/lit.rst
index 938b7f9..eb90e95 100644
--- a/llvm/docs/CommandGuide/lit.rst
+++ b/llvm/docs/CommandGuide/lit.rst
@@ -356,6 +356,11 @@ The timing data is stored in the `test_exec_root` in a file named
primary purpose is to suppress an ``XPASS`` result without modifying a test
case that uses the ``XFAIL`` directive.
+.. option:: --exclude-xfail
+
+ ``XFAIL`` tests won't be run, unless they are listed in the ``--xfail-not``
+ (or ``LIT_XFAIL_NOT``) lists.
+
.. option:: --num-shards M
Divide the set of selected tests into ``M`` equal-sized subsets or
diff --git a/llvm/docs/HowToCrossCompileBuiltinsOnArm.rst b/llvm/docs/HowToCrossCompileBuiltinsOnArm.rst
index 2e199a0..31ead45 100644
--- a/llvm/docs/HowToCrossCompileBuiltinsOnArm.rst
+++ b/llvm/docs/HowToCrossCompileBuiltinsOnArm.rst
@@ -14,117 +14,113 @@ targets are welcome.
The instructions in this document depend on libraries and programs external to
LLVM, there are many ways to install and configure these dependencies so you
-may need to adapt the instructions here to fit your own local situation.
+may need to adapt the instructions here to fit your own situation.
Prerequisites
=============
-In this use case we'll be using cmake on a Debian-based Linux system,
-cross-compiling from an x86_64 host to a hard-float Armv7-A target. We'll be
+In this use case we will be using cmake on a Debian-based Linux system,
+cross-compiling from an x86_64 host to a hard-float Armv7-A target. We will be
using as many of the LLVM tools as we can, but it is possible to use GNU
equivalents.
- * ``A build of LLVM/clang for the llvm-tools and llvm-config``
- * ``A clang executable with support for the ARM target``
- * ``compiler-rt sources``
- * ``The qemu-arm user mode emulator``
- * ``An arm-linux-gnueabihf sysroot``
+You will need:
+ * A build of LLVM for the llvm-tools and ``llvm-config``.
+ * A clang executable with support for the ``ARM`` target.
+ * compiler-rt sources.
+ * The ``qemu-arm`` user mode emulator.
+ * An ``arm-linux-gnueabihf`` sysroot.
-In this example we will be using ninja.
+In this example we will be using ``ninja`` as the build tool.
-See https://compiler-rt.llvm.org/ for more information about the dependencies
+See https://compiler-rt.llvm.org/ for information about the dependencies
on clang and LLVM.
See https://llvm.org/docs/GettingStarted.html for information about obtaining
-the source for LLVM and compiler-rt. Note that the getting started guide
-places compiler-rt in the projects subdirectory, but this is not essential and
-if you are using the BaremetalARM.cmake cache for v6-M, v7-M and v7-EM then
-compiler-rt must be placed in the runtimes directory.
+the source for LLVM and compiler-rt.
``qemu-arm`` should be available as a package for your Linux distribution.
-The most complicated of the prerequisites to satisfy is the arm-linux-gnueabihf
+The most complicated of the prerequisites to satisfy is the ``arm-linux-gnueabihf``
sysroot. In theory it is possible to use the Linux distributions multiarch
support to fulfill the dependencies for building but unfortunately due to
-/usr/local/include being added some host includes are selected. The easiest way
-to supply a sysroot is to download the arm-linux-gnueabihf toolchain. This can
-be found at:
-* https://developer.arm.com/open-source/gnu-toolchain/gnu-a/downloads for gcc 8 and above
-* https://releases.linaro.org/components/toolchain/binaries/ for gcc 4.9 to 7.3
+``/usr/local/include`` being added some host includes are selected.
+
+The easiest way to supply a sysroot is to download an ``arm-linux-gnueabihf``
+toolchain from https://developer.arm.com/open-source/gnu-toolchain/gnu-a/downloads.
Building compiler-rt builtins for Arm
=====================================
+
We will be doing a standalone build of compiler-rt using the following cmake
-options.
-
-* ``path/to/compiler-rt``
-* ``-G Ninja``
-* ``-DCMAKE_AR=/path/to/llvm-ar``
-* ``-DCMAKE_ASM_COMPILER_TARGET="arm-linux-gnueabihf"``
-* ``-DCMAKE_ASM_FLAGS="build-c-flags"``
-* ``-DCMAKE_C_COMPILER=/path/to/clang``
-* ``-DCMAKE_C_COMPILER_TARGET="arm-linux-gnueabihf"``
-* ``-DCMAKE_C_FLAGS="build-c-flags"``
-* ``-DCMAKE_EXE_LINKER_FLAGS="-fuse-ld=lld"``
-* ``-DCMAKE_NM=/path/to/llvm-nm``
-* ``-DCMAKE_RANLIB=/path/to/llvm-ranlib``
-* ``-DCOMPILER_RT_BUILD_BUILTINS=ON``
-* ``-DCOMPILER_RT_BUILD_LIBFUZZER=OFF``
-* ``-DCOMPILER_RT_BUILD_MEMPROF=OFF``
-* ``-DCOMPILER_RT_BUILD_PROFILE=OFF``
-* ``-DCOMPILER_RT_BUILD_SANITIZERS=OFF``
-* ``-DCOMPILER_RT_BUILD_XRAY=OFF``
-* ``-DCOMPILER_RT_DEFAULT_TARGET_ONLY=ON``
-* ``-DLLVM_CONFIG_PATH=/path/to/llvm-config``
+options::
+
+ cmake path/to/compiler-rt \
+ -G Ninja \
+ -DCMAKE_AR=/path/to/llvm-ar \
+ -DCMAKE_ASM_COMPILER_TARGET="arm-linux-gnueabihf" \
+ -DCMAKE_ASM_FLAGS="build-c-flags" \
+ -DCMAKE_C_COMPILER=/path/to/clang \
+ -DCMAKE_C_COMPILER_TARGET="arm-linux-gnueabihf" \
+ -DCMAKE_C_FLAGS="build-c-flags" \
+ -DCMAKE_EXE_LINKER_FLAGS="-fuse-ld=lld" \
+ -DCMAKE_NM=/path/to/llvm-nm \
+ -DCMAKE_RANLIB=/path/to/llvm-ranlib \
+ -DCOMPILER_RT_BUILD_BUILTINS=ON \
+ -DCOMPILER_RT_BUILD_LIBFUZZER=OFF \
+ -DCOMPILER_RT_BUILD_MEMPROF=OFF \
+ -DCOMPILER_RT_BUILD_PROFILE=OFF \
+ -DCOMPILER_RT_BUILD_SANITIZERS=OFF \
+ -DCOMPILER_RT_BUILD_XRAY=OFF \
+ -DCOMPILER_RT_DEFAULT_TARGET_ONLY=ON \
+ -DLLVM_CONFIG_PATH=/path/to/llvm-config
The ``build-c-flags`` need to be sufficient to pass the C-make compiler check,
compile compiler-rt, and if you are running the tests, compile and link the
tests. When cross-compiling with clang we will need to pass sufficient
-information to generate code for the Arm architecture we are targeting. We will
-need to select the Arm target, select the Armv7-A architecture and choose
-between using Arm or Thumb.
-instructions. For example:
+information to generate code for the Arm architecture we are targeting.
-* ``--target=arm-linux-gnueabihf``
-* ``-march=armv7a``
-* ``-mthumb``
+We will need to select:
+ * The Arm target and Armv7-A architecture with ``--target=arm-linux-gnueabihf -march=armv7a``.
+ * Whether to generate Arm (the default) or Thumb instructions (``-mthumb``).
-When using a GCC arm-linux-gnueabihf toolchain the following flags are
+When using a GCC ``arm-linux-gnueabihf`` toolchain the following flags are
needed to pick up the includes and libraries:
-* ``--gcc-toolchain=/path/to/dir/toolchain``
-* ``--sysroot=/path/to/toolchain/arm-linux-gnueabihf/libc``
+ * ``--gcc-toolchain=/path/to/dir/toolchain``
+ * ``--sysroot=/path/to/toolchain/arm-linux-gnueabihf/libc``
In this example we will be adding all of the command line options to both
``CMAKE_C_FLAGS`` and ``CMAKE_ASM_FLAGS``. There are cmake flags to pass some of
-these options individually which can be used to simplify the ``build-c-flags``:
+these options individually which can be used to simplify the ``build-c-flags``::
-* ``-DCMAKE_C_COMPILER_TARGET="arm-linux-gnueabihf"``
-* ``-DCMAKE_ASM_COMPILER_TARGET="arm-linux-gnueabihf"``
-* ``-DCMAKE_C_COMPILER_EXTERNAL_TOOLCHAIN=/path/to/dir/toolchain``
-* ``-DCMAKE_SYSROOT=/path/to/dir/toolchain/arm-linux-gnueabihf/libc``
+ -DCMAKE_C_COMPILER_TARGET="arm-linux-gnueabihf"
+ -DCMAKE_ASM_COMPILER_TARGET="arm-linux-gnueabihf"
+ -DCMAKE_C_COMPILER_EXTERNAL_TOOLCHAIN=/path/to/dir/toolchain
+ -DCMAKE_SYSROOT=/path/to/dir/toolchain/arm-linux-gnueabihf/libc
Once cmake has completed the builtins can be built with ``ninja builtins``
Testing compiler-rt builtins using qemu-arm
===========================================
+
To test the builtins library we need to add a few more cmake flags to enable
testing and set up the compiler and flags for test case. We must also tell
-cmake that we wish to run the tests on ``qemu-arm``.
+cmake that we wish to run the tests on ``qemu-arm``::
-* ``-DCOMPILER_RT_EMULATOR="qemu-arm -L /path/to/armhf/sysroot``
-* ``-DCOMPILER_RT_INCLUDE_TESTS=ON``
-* ``-DCOMPILER_RT_TEST_COMPILER="/path/to/clang"``
-* ``-DCOMPILER_RT_TEST_COMPILER_CFLAGS="test-c-flags"``
+ -DCOMPILER_RT_EMULATOR="qemu-arm -L /path/to/armhf/sysroot"
+ -DCOMPILER_RT_INCLUDE_TESTS=ON
+ -DCOMPILER_RT_TEST_COMPILER="/path/to/clang"
+ -DCOMPILER_RT_TEST_COMPILER_CFLAGS="test-c-flags"
The ``/path/to/armhf/sysroot`` should be the same as the one passed to
-``--sysroot`` in the "build-c-flags".
+``--sysroot`` in the ``build-c-flags``.
-The "test-c-flags" need to include the target, architecture, gcc-toolchain,
-sysroot and arm/thumb state. The additional cmake defines such as
+The ``test-c-flags`` need to include the target, architecture, gcc-toolchain,
+sysroot and Arm/Thumb state. The additional cmake defines such as
``CMAKE_C_COMPILER_EXTERNAL_TOOLCHAIN`` do not apply when building the tests. If
-you have put all of these in "build-c-flags" then these can be repeated. If you
-wish to use lld to link the tests then add ``"-fuse-ld=lld``.
+you have put all of these in ``build-c-flags`` then these can be repeated. If you
+wish to use lld to link the tests then add ``-fuse-ld=lld``.
Once cmake has completed the tests can be built and run using
``ninja check-builtins``
@@ -142,19 +138,21 @@ This stage can often fail at link time if the ``--sysroot=`` and
``CMAKE_C_FLAGS`` and ``CMAKE_C_COMPILER_TARGET`` flags.
It can be useful to build a simple example outside of cmake with your toolchain
-to make sure it is working. For example: ``clang --target=arm-linux-gnueabi -march=armv7a --gcc-toolchain=/path/to/gcc-toolchain --sysroot=/path/to/gcc-toolchain/arm-linux-gnueabihf/libc helloworld.c``
+to make sure it is working. For example::
+
+ clang --target=arm-linux-gnueabi -march=armv7a --gcc-toolchain=/path/to/gcc-toolchain --sysroot=/path/to/gcc-toolchain/arm-linux-gnueabihf/libc helloworld.c
Clang uses the host header files
--------------------------------
On debian based systems it is possible to install multiarch support for
-arm-linux-gnueabi and arm-linux-gnueabihf. In many cases clang can successfully
+``arm-linux-gnueabi`` and ``arm-linux-gnueabihf``. In many cases clang can successfully
use this multiarch support when ``--gcc-toolchain=`` and ``--sysroot=`` are not supplied.
Unfortunately clang adds ``/usr/local/include`` before
``/usr/include/arm-linux-gnueabihf`` leading to errors when compiling the hosts
header files.
The multiarch support is not sufficient to build the builtins you will need to
-use a separate arm-linux-gnueabihf toolchain.
+use a separate ``arm-linux-gnueabihf`` toolchain.
No target passed to clang
-------------------------
@@ -164,12 +162,13 @@ as ``error: unknown directive .syntax unified``.
You can check the clang invocation in the error message to see if there is no
``--target`` or if it is set incorrectly. The cause is usually
-``CMAKE_ASM_FLAGS`` not containing ``--target`` or ``CMAKE_ASM_COMPILER_TARGET`` not being present.
+``CMAKE_ASM_FLAGS`` not containing ``--target`` or ``CMAKE_ASM_COMPILER_TARGET``
+not being present.
Arm architecture not given
--------------------------
-The ``--target=arm-linux-gnueabihf`` will default to arm architecture v4t which
-cannot assemble the barrier instructions used in the synch_and_fetch source
+The ``--target=arm-linux-gnueabihf`` will default to Arm architecture v4t which
+cannot assemble the barrier instructions used in the ``synch_and_fetch`` source
files.
The cause is usually a missing ``-march=armv7a`` from the ``CMAKE_ASM_FLAGS``.
@@ -202,7 +201,7 @@ may need extra c-flags such as ``-mfloat-abi=softfp`` for use of floating-point
instructions, and ``-mfloat-abi=soft -mfpu=none`` for software floating-point
emulation.
-You will need to use an arm-linux-gnueabi GNU toolchain for soft-float.
+You will need to use an ``arm-linux-gnueabi`` GNU toolchain for soft-float.
AArch64 Target
--------------
@@ -220,8 +219,12 @@ Armv6-m, Armv7-m and Armv7E-M targets
To build and test the libraries using a similar method to Armv7-A is possible
but more difficult. The main problems are:
-* There isn't a ``qemu-arm`` user-mode emulator for bare-metal systems. The ``qemu-system-arm`` can be used but this is significantly more difficult to setup.
-* The targets to compile compiler-rt have the suffix -none-eabi. This uses the BareMetal driver in clang and by default won't find the libraries needed to pass the cmake compiler check.
+* There is not a ``qemu-arm`` user-mode emulator for bare-metal systems.
+ ``qemu-system-arm`` can be used but this is significantly more difficult
+ to setup.
+* The targets to compile compiler-rt have the suffix ``-none-eabi``. This uses
+ the BareMetal driver in clang and by default will not find the libraries
+ needed to pass the cmake compiler check.
As the Armv6-M, Armv7-M and Armv7E-M builds of compiler-rt only use instructions
that are supported on Armv7-A we can still get most of the value of running the
@@ -233,32 +236,30 @@ builtins use instructions that are supported on Armv7-A but not Armv6-M,
Armv7-M and Armv7E-M.
To get the cmake compile test to pass you will need to pass the libraries
-needed to successfully link the cmake test via ``CMAKE_CFLAGS``. It is
-strongly recommended that you use version 3.6 or above of cmake so you can use
-``CMAKE_TRY_COMPILE_TARGET=STATIC_LIBRARY`` to skip the link step.
-
-* ``-DCMAKE_TRY_COMPILE_TARGET_TYPE=STATIC_LIBRARY``
-* ``-DCOMPILER_RT_OS_DIR="baremetal"``
-* ``-DCOMPILER_RT_BUILD_BUILTINS=ON``
-* ``-DCOMPILER_RT_BUILD_SANITIZERS=OFF``
-* ``-DCOMPILER_RT_BUILD_XRAY=OFF``
-* ``-DCOMPILER_RT_BUILD_LIBFUZZER=OFF``
-* ``-DCOMPILER_RT_BUILD_PROFILE=OFF``
-* ``-DCMAKE_C_COMPILER=${host_install_dir}/bin/clang``
-* ``-DCMAKE_C_COMPILER_TARGET="your *-none-eabi target"``
-* ``-DCMAKE_ASM_COMPILER_TARGET="your *-none-eabi target"``
-* ``-DCMAKE_AR=/path/to/llvm-ar``
-* ``-DCMAKE_NM=/path/to/llvm-nm``
-* ``-DCMAKE_RANLIB=/path/to/llvm-ranlib``
-* ``-DCOMPILER_RT_BAREMETAL_BUILD=ON``
-* ``-DCOMPILER_RT_DEFAULT_TARGET_ONLY=ON``
-* ``-DLLVM_CONFIG_PATH=/path/to/llvm-config``
-* ``-DCMAKE_C_FLAGS="build-c-flags"``
-* ``-DCMAKE_ASM_FLAGS="build-c-flags"``
-* ``-DCOMPILER_RT_EMULATOR="qemu-arm -L /path/to/armv7-A/sysroot"``
-* ``-DCOMPILER_RT_INCLUDE_TESTS=ON``
-* ``-DCOMPILER_RT_TEST_COMPILER="/path/to/clang"``
-* ``-DCOMPILER_RT_TEST_COMPILER_CFLAGS="test-c-flags"``
+needed to successfully link the cmake test via ``CMAKE_CFLAGS``::
+
+ -DCMAKE_TRY_COMPILE_TARGET_TYPE=STATIC_LIBRARY \
+ -DCOMPILER_RT_OS_DIR="baremetal" \
+ -DCOMPILER_RT_BUILD_BUILTINS=ON \
+ -DCOMPILER_RT_BUILD_SANITIZERS=OFF \
+ -DCOMPILER_RT_BUILD_XRAY=OFF \
+ -DCOMPILER_RT_BUILD_LIBFUZZER=OFF \
+ -DCOMPILER_RT_BUILD_PROFILE=OFF \
+ -DCMAKE_C_COMPILER=${host_install_dir}/bin/clang \
+ -DCMAKE_C_COMPILER_TARGET="your *-none-eabi target" \
+ -DCMAKE_ASM_COMPILER_TARGET="your *-none-eabi target" \
+ -DCMAKE_AR=/path/to/llvm-ar \
+ -DCMAKE_NM=/path/to/llvm-nm \
+ -DCMAKE_RANLIB=/path/to/llvm-ranlib \
+ -DCOMPILER_RT_BAREMETAL_BUILD=ON \
+ -DCOMPILER_RT_DEFAULT_TARGET_ONLY=ON \
+ -DLLVM_CONFIG_PATH=/path/to/llvm-config \
+ -DCMAKE_C_FLAGS="build-c-flags" \
+ -DCMAKE_ASM_FLAGS="build-c-flags" \
+ -DCOMPILER_RT_EMULATOR="qemu-arm -L /path/to/armv7-A/sysroot" \
+ -DCOMPILER_RT_INCLUDE_TESTS=ON \
+ -DCOMPILER_RT_TEST_COMPILER="/path/to/clang" \
+ -DCOMPILER_RT_TEST_COMPILER_CFLAGS="test-c-flags"
The Armv6-M builtins will use the soft-float ABI. When compiling the tests for
Armv7-A we must include ``"-mthumb -mfloat-abi=soft -mfpu=none"`` in the
@@ -267,25 +268,21 @@ test-c-flags. We must use an Armv7-A soft-float abi sysroot for ``qemu-arm``.
Depending on the linker used for the test cases you may encounter BuildAttribute
mismatches between the M-profile objects from compiler-rt and the A-profile
objects from the test. The lld linker does not check the profile
-BuildAttribute so it can be used to link the tests by adding -fuse-ld=lld to the
+BuildAttribute so it can be used to link the tests by adding ``-fuse-ld=lld`` to the
``COMPILER_RT_TEST_COMPILER_CFLAGS``.
Alternative using a cmake cache
-------------------------------
If you wish to build, but not test compiler-rt for Armv6-M, Armv7-M or Armv7E-M
-the easiest way is to use the BaremetalARM.cmake recipe in clang/cmake/caches.
-
-You will need a bare metal sysroot such as that provided by the GNU ARM
-Embedded toolchain.
-
-The libraries can be built with the cmake options:
+the easiest way is to use the ``BaremetalARM.cmake`` recipe in ``clang/cmake/caches``.
-* ``-DBAREMETAL_ARMV6M_SYSROOT=/path/to/bare/metal/toolchain/arm-none-eabi``
-* ``-DBAREMETAL_ARMV7M_SYSROOT=/path/to/bare/metal/toolchain/arm-none-eabi``
-* ``-DBAREMETAL_ARMV7EM_SYSROOT=/path/to/bare/metal/toolchain/arm-none-eabi``
-* ``-C /path/to/llvm/source/tools/clang/cmake/caches/BaremetalARM.cmake``
-* ``/path/to/llvm``
+You will need a bare metal sysroot such as that provided by the GNU ARM Embedded
+toolchain.
-**Note** that for the recipe to work the compiler-rt source must be checked out
-into the directory llvm/runtimes. You will also need clang and lld checked out.
+The libraries can be built with the cmake options::
+ -DBAREMETAL_ARMV6M_SYSROOT=/path/to/bare/metal/toolchain/arm-none-eabi \
+ -DBAREMETAL_ARMV7M_SYSROOT=/path/to/bare/metal/toolchain/arm-none-eabi \
+ -DBAREMETAL_ARMV7EM_SYSROOT=/path/to/bare/metal/toolchain/arm-none-eabi \
+ -C /path/to/llvm/source/tools/clang/cmake/caches/BaremetalARM.cmake \
+ /path/to/llvm
diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md
index 021f321..0c49fc8 100644
--- a/llvm/docs/ReleaseNotes.md
+++ b/llvm/docs/ReleaseNotes.md
@@ -137,6 +137,9 @@ Changes to the LLVM tools
Changes to LLDB
---------------------------------
+* LLDB can now set breakpoints, show backtraces, and display variables when
+ debugging Wasm with supported runtimes (WAMR and V8).
+
Changes to BOLT
---------------------------------
diff --git a/llvm/docs/SPIRVUsage.rst b/llvm/docs/SPIRVUsage.rst
index 1f563fb..fdefc53 100644
--- a/llvm/docs/SPIRVUsage.rst
+++ b/llvm/docs/SPIRVUsage.rst
@@ -131,9 +131,23 @@ Extensions
The SPIR-V backend supports a variety of `extensions <https://github.com/KhronosGroup/SPIRV-Registry/tree/main/extensions>`_
that enable or enhance features beyond the core SPIR-V specification.
-These extensions can be enabled using the ``-spirv-extensions`` option
-followed by the name of the extension(s) you wish to enable. Below is a
-list of supported SPIR-V extensions, sorted alphabetically by their extension names:
+The enabled extensions can be controlled using the ``-spirv-ext`` option followed by a list of
+extensions to enable or disable, each prefixed with ``+`` or ``-``, respectively.
+
+To enable multiple extensions, list them separated by comma. For example, to enable support for atomic operations on floating-point numbers and arbitrary precision integers, use:
+
+``-spirv-ext=+SPV_EXT_shader_atomic_float_add,+SPV_INTEL_arbitrary_precision_integers``
+
+To enable all extensions, use the following option:
+``-spirv-ext=all``
+
+To enable all KHR extensions, use the following option:
+``-spirv-ext=khr``
+
+To enable all extensions except specified, specify ``all`` followed by a list of disallowed extensions. For example:
+``-spirv-ext=all,-SPV_INTEL_arbitrary_precision_integers``
+
+Below is a list of supported SPIR-V extensions, sorted alphabetically by their extension names:
.. list-table:: Supported SPIR-V Extensions
:widths: 50 150
@@ -220,16 +234,6 @@ list of supported SPIR-V extensions, sorted alphabetically by their extension na
* - ``SPV_KHR_float_controls2``
- Adds ability to specify the floating-point environment in shaders. It can be used on whole modules and individual instructions.
-To enable multiple extensions, list them separated by comma. For example, to enable support for atomic operations on floating-point numbers and arbitrary precision integers, use:
-
-``-spirv-ext=+SPV_EXT_shader_atomic_float_add,+SPV_INTEL_arbitrary_precision_integers``
-
-To enable all extensions, use the following option:
-``-spirv-ext=all``
-
-To enable all extensions except specified, specify ``all`` followed by a list of disallowed extensions. For example:
-``-spirv-ext=all,-SPV_INTEL_arbitrary_precision_integers``
-
SPIR-V representation in LLVM IR
================================
diff --git a/llvm/include/llvm/ADT/Any.h b/llvm/include/llvm/ADT/Any.h
index 88dbce9..a29aaa3 100644
--- a/llvm/include/llvm/ADT/Any.h
+++ b/llvm/include/llvm/ADT/Any.h
@@ -119,7 +119,6 @@ private:
template <class T> friend T any_cast(Any &&Value);
template <class T> friend const T *any_cast(const Any *Value);
template <class T> friend T *any_cast(Any *Value);
- template <typename T> friend bool any_isa(const Any &Value);
std::unique_ptr<StorageBase> Storage;
};
diff --git a/llvm/include/llvm/Analysis/DXILResource.h b/llvm/include/llvm/Analysis/DXILResource.h
index 956dcbc..93c6bfb 100644
--- a/llvm/include/llvm/Analysis/DXILResource.h
+++ b/llvm/include/llvm/Analysis/DXILResource.h
@@ -12,6 +12,7 @@
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Frontend/HLSL/HLSLBinding.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/PassManager.h"
@@ -633,86 +634,25 @@ LLVM_ABI ModulePass *createDXILResourceWrapperPassPass();
// register slots to resources with implicit bindings, and in a
// post-optimization validation pass that will raise diagnostic about
// overlapping bindings.
-//
-// For example for these resource bindings:
-//
-// RWBuffer<float> A[10] : register(u3);
-// RWBuffer<float> B[] : register(u5, space2)
-//
-// The analysis result for UAV binding type will look like this:
-//
-// UAVSpaces {
-// ResClass = ResourceClass::UAV,
-// Spaces = {
-// { Space = 0, FreeRanges = {{ 0, 2 }, { 13, UINT32_MAX }} },
-// { Space = 2, FreeRanges = {{ 0, 4 }} }
-// }
-// }
-//
class DXILResourceBindingInfo {
-public:
- struct BindingRange {
- uint32_t LowerBound;
- uint32_t UpperBound;
- BindingRange(uint32_t LB, uint32_t UB) : LowerBound(LB), UpperBound(UB) {}
- };
-
- struct RegisterSpace {
- uint32_t Space;
- SmallVector<BindingRange> FreeRanges;
- RegisterSpace(uint32_t Space) : Space(Space) {
- FreeRanges.emplace_back(0, UINT32_MAX);
- }
- // Size == -1 means unbounded array
- LLVM_ABI std::optional<uint32_t> findAvailableBinding(int32_t Size);
- };
-
- struct BindingSpaces {
- dxil::ResourceClass RC;
- llvm::SmallVector<RegisterSpace> Spaces;
- BindingSpaces(dxil::ResourceClass RC) : RC(RC) {}
- LLVM_ABI RegisterSpace &getOrInsertSpace(uint32_t Space);
- };
-
-private:
- BindingSpaces SRVSpaces, UAVSpaces, CBufferSpaces, SamplerSpaces;
- bool ImplicitBinding;
- bool OverlappingBinding;
+ hlsl::BindingInfo Bindings;
+ bool HasImplicitBinding = false;
+ bool HasOverlappingBinding = false;
// Populate the resource binding info given explicit resource binding calls
// in the module.
void populate(Module &M, DXILResourceTypeMap &DRTM);
public:
- DXILResourceBindingInfo()
- : SRVSpaces(dxil::ResourceClass::SRV),
- UAVSpaces(dxil::ResourceClass::UAV),
- CBufferSpaces(dxil::ResourceClass::CBuffer),
- SamplerSpaces(dxil::ResourceClass::Sampler), ImplicitBinding(false),
- OverlappingBinding(false) {}
-
- bool hasImplicitBinding() const { return ImplicitBinding; }
- void setHasImplicitBinding(bool Value) { ImplicitBinding = Value; }
- bool hasOverlappingBinding() const { return OverlappingBinding; }
-
- BindingSpaces &getBindingSpaces(dxil::ResourceClass RC) {
- switch (RC) {
- case dxil::ResourceClass::SRV:
- return SRVSpaces;
- case dxil::ResourceClass::UAV:
- return UAVSpaces;
- case dxil::ResourceClass::CBuffer:
- return CBufferSpaces;
- case dxil::ResourceClass::Sampler:
- return SamplerSpaces;
- }
+ bool hasImplicitBinding() const { return HasImplicitBinding; }
+ void setHasImplicitBinding(bool Value) { HasImplicitBinding = Value; }
+ bool hasOverlappingBinding() const { return HasOverlappingBinding; }
+ void setHasOverlappingBinding(bool Value) { HasOverlappingBinding = Value; }
- llvm_unreachable("Invalid resource class");
- }
-
- // Size == -1 means unbounded array
LLVM_ABI std::optional<uint32_t>
- findAvailableBinding(dxil::ResourceClass RC, uint32_t Space, int32_t Size);
+ findAvailableBinding(dxil::ResourceClass RC, uint32_t Space, int32_t Size) {
+ return Bindings.findAvailableBinding(RC, Space, Size);
+ }
friend class DXILResourceBindingAnalysis;
friend class DXILResourceBindingWrapperPass;
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
index bff7707..011d599 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
@@ -91,6 +91,10 @@ inline bind_ty<const SCEVUnknown> m_SCEVUnknown(const SCEVUnknown *&V) {
return V;
}
+inline bind_ty<const SCEVAddExpr> m_scev_Add(const SCEVAddExpr *&V) {
+ return V;
+}
+
/// Match a specified const SCEV *.
struct specificscev_ty {
const SCEV *Expr;
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 7928835..aa4550d 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -1950,6 +1950,10 @@ public:
const Function &F,
SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const;
+ /// Returns true if GEP should not be used to index into vectors for this
+ /// target.
+ LLVM_ABI bool allowVectorElementIndexingUsingGEP() const;
+
private:
std::unique_ptr<const TargetTransformInfoImplBase> TTIImpl;
};
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 2ea87b3..abdbca0 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -1145,6 +1145,8 @@ public:
const Function &F,
SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const {}
+ virtual bool allowVectorElementIndexingUsingGEP() const { return true; }
+
protected:
// Obtain the minimum required size to hold the value (without the sign)
// In case of a vector it returns the min required size for one element.
diff --git a/llvm/include/llvm/Analysis/VectorUtils.h b/llvm/include/llvm/Analysis/VectorUtils.h
index b55c4e0..6781cd5 100644
--- a/llvm/include/llvm/Analysis/VectorUtils.h
+++ b/llvm/include/llvm/Analysis/VectorUtils.h
@@ -633,6 +633,9 @@ public:
return true;
}
+ /// Return true if this group is full, i.e. it has no gaps.
+ bool isFull() const { return getNumMembers() == getFactor(); }
+
private:
uint32_t Factor; // Interleave Factor.
bool Reverse;
diff --git a/llvm/include/llvm/BinaryFormat/ELF.h b/llvm/include/llvm/BinaryFormat/ELF.h
index ad35d7f..749971e 100644
--- a/llvm/include/llvm/BinaryFormat/ELF.h
+++ b/llvm/include/llvm/BinaryFormat/ELF.h
@@ -973,7 +973,10 @@ enum : unsigned {
// SM based processor values.
EF_CUDA_SM100 = 0x6400,
+ EF_CUDA_SM101 = 0x6500,
+ EF_CUDA_SM103 = 0x6700,
EF_CUDA_SM120 = 0x7800,
+ EF_CUDA_SM121 = 0x7900,
// Set when using an accelerator variant like sm_100a.
EF_CUDA_ACCELERATORS = 0x8,
diff --git a/llvm/include/llvm/Bitstream/BitstreamWriter.h b/llvm/include/llvm/Bitstream/BitstreamWriter.h
index 78f5eb4..5f53681 100644
--- a/llvm/include/llvm/Bitstream/BitstreamWriter.h
+++ b/llvm/include/llvm/Bitstream/BitstreamWriter.h
@@ -466,7 +466,7 @@ private:
EmitCode(Abbrev);
- unsigned i = 0, e = static_cast<unsigned>(Abbv->getNumOperandInfos());
+ unsigned i = 0, e = Abbv->getNumOperandInfos();
if (Code) {
assert(e && "Expected non-empty abbreviation");
const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i++);
@@ -632,8 +632,7 @@ private:
void EncodeAbbrev(const BitCodeAbbrev &Abbv) {
EmitCode(bitc::DEFINE_ABBREV);
EmitVBR(Abbv.getNumOperandInfos(), 5);
- for (unsigned i = 0, e = static_cast<unsigned>(Abbv.getNumOperandInfos());
- i != e; ++i) {
+ for (unsigned i = 0, e = Abbv.getNumOperandInfos(); i != e; ++i) {
const BitCodeAbbrevOp &Op = Abbv.getOperandInfo(i);
Emit(Op.isLiteral(), 1);
if (Op.isLiteral()) {
diff --git a/llvm/include/llvm/CodeGen/AsmPrinter.h b/llvm/include/llvm/CodeGen/AsmPrinter.h
index faab2503..91c0142 100644
--- a/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -190,6 +190,36 @@ private:
/// Emit comments in assembly output if this is true.
bool VerboseAsm;
+ /// Store symbols and type identifiers used to create callgraph section
+ /// entries related to a function.
+ struct FunctionInfo {
+ /// Numeric type identifier used in callgraph section for indirect calls
+ /// and targets.
+ using CGTypeId = uint64_t;
+
+ /// Enumeration of function kinds, and their mapping to function kind values
+ /// stored in callgraph section entries.
+ /// Must match the enum in llvm/tools/llvm-objdump/llvm-objdump.cpp.
+ enum class FunctionKind : uint64_t {
+ /// Function cannot be target to indirect calls.
+ NOT_INDIRECT_TARGET = 0,
+
+ /// Function may be target to indirect calls but its type id is unknown.
+ INDIRECT_TARGET_UNKNOWN_TID = 1,
+
+ /// Function may be target to indirect calls and its type id is known.
+ INDIRECT_TARGET_KNOWN_TID = 2,
+ };
+
+ /// Map type identifiers to callsite labels. Labels are generated for each
+ /// indirect callsite in the function.
+ SmallVector<std::pair<CGTypeId, MCSymbol *>> CallSiteLabels;
+ };
+
+ enum CallGraphSectionFormatVersion : uint64_t {
+ V_0 = 0,
+ };
+
/// Output stream for the stack usage file (i.e., .su file).
std::unique_ptr<raw_fd_ostream> StackUsageStream;
@@ -355,6 +385,13 @@ public:
/// are available. Returns empty string otherwise.
StringRef getConstantSectionSuffix(const Constant *C) const;
+ /// Generate and emit labels for callees of the indirect callsites which will
+ /// be used to populate the .callgraph section.
+ void emitIndirectCalleeLabels(
+ FunctionInfo &FuncInfo,
+ const MachineFunction::CallSiteInfoMap &CallSitesInfoMap,
+ const MachineInstr &MI);
+
//===------------------------------------------------------------------===//
// XRay instrumentation implementation.
//===------------------------------------------------------------------===//
@@ -442,6 +479,8 @@ public:
void emitKCFITrapEntry(const MachineFunction &MF, const MCSymbol *Symbol);
virtual void emitKCFITypeId(const MachineFunction &MF);
+ void emitCallGraphSection(const MachineFunction &MF, FunctionInfo &FuncInfo);
+
void emitPseudoProbe(const MachineInstr &MI);
void emitRemarksSection(remarks::RemarkStreamer &RS);
diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index 938d71d..9e3d919 100644
--- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -323,10 +323,11 @@ public:
const MachineFunction *getParent() const { return xParent; }
MachineFunction *getParent() { return xParent; }
- /// Returns true if the original IR terminator is an `indirectbr`. This
- /// typically corresponds to a `goto` in C, rather than jump tables.
- bool terminatorIsComputedGoto() const {
- return back().isIndirectBranch() &&
+ /// Returns true if the original IR terminator is an `indirectbr` with
+ /// successor blocks. This typically corresponds to a `goto` in C, rather than
+ /// jump tables.
+ bool terminatorIsComputedGotoWithSuccessors() const {
+ return back().isIndirectBranch() && !succ_empty() &&
llvm::all_of(successors(), [](const MachineBasicBlock *Succ) {
return Succ->isIRBlockAddressTaken();
});
diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h
index 7f88323..06c4daf 100644
--- a/llvm/include/llvm/CodeGen/MachineFunction.h
+++ b/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -517,6 +517,13 @@ public:
SmallVector<ArgRegPair, 1> ArgRegPairs;
/// Callee type ids.
SmallVector<ConstantInt *, 4> CalleeTypeIds;
+
+ CallSiteInfo() = default;
+
+ /// Extracts the numeric type id from the CallBase's callee_type Metadata,
+ /// and sets CalleeTypeIds. This is used as type id for the indirect call in
+ /// the call graph section.
+ CallSiteInfo(const CallBase &CB);
};
struct CalledGlobalInfo {
@@ -524,11 +531,12 @@ public:
unsigned TargetFlags;
};
+ using CallSiteInfoMap = DenseMap<const MachineInstr *, CallSiteInfo>;
+
private:
Delegate *TheDelegate = nullptr;
GISelChangeObserver *Observer = nullptr;
- using CallSiteInfoMap = DenseMap<const MachineInstr *, CallSiteInfo>;
/// Map a call instruction to call site arguments forwarding info.
CallSiteInfoMap CallSitesInfo;
diff --git a/llvm/include/llvm/CodeGen/MachineInstrBuilder.h b/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
index e63e77a..e705d7d9 100644
--- a/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
+++ b/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -69,6 +69,32 @@ enum {
} // end namespace RegState
+/// Set of metadata that should be preserved when using BuildMI(). This provides
+/// a more convenient way of preserving DebugLoc, PCSections and MMRA.
+class MIMetadata {
+public:
+ MIMetadata() = default;
+ MIMetadata(DebugLoc DL, MDNode *PCSections = nullptr, MDNode *MMRA = nullptr)
+ : DL(std::move(DL)), PCSections(PCSections), MMRA(MMRA) {}
+ MIMetadata(const DILocation *DI, MDNode *PCSections = nullptr,
+ MDNode *MMRA = nullptr)
+ : DL(DI), PCSections(PCSections), MMRA(MMRA) {}
+ explicit MIMetadata(const Instruction &From)
+ : DL(From.getDebugLoc()),
+ PCSections(From.getMetadata(LLVMContext::MD_pcsections)) {}
+ explicit MIMetadata(const MachineInstr &From)
+ : DL(From.getDebugLoc()), PCSections(From.getPCSections()) {}
+
+ const DebugLoc &getDL() const { return DL; }
+ MDNode *getPCSections() const { return PCSections; }
+ MDNode *getMMRAMetadata() const { return MMRA; }
+
+private:
+ DebugLoc DL;
+ MDNode *PCSections = nullptr;
+ MDNode *MMRA = nullptr;
+};
+
class MachineInstrBuilder {
MachineFunction *MF = nullptr;
MachineInstr *MI = nullptr;
@@ -317,15 +343,11 @@ public:
}
}
- const MachineInstrBuilder &setPCSections(MDNode *MD) const {
- if (MD)
- MI->setPCSections(*MF, MD);
- return *this;
- }
-
- const MachineInstrBuilder &setMMRAMetadata(MDNode *MMRA) const {
- if (MMRA)
- MI->setMMRAMetadata(*MF, MMRA);
+ const MachineInstrBuilder &copyMIMetadata(const MIMetadata &MIMD) const {
+ if (MIMD.getPCSections())
+ MI->setPCSections(*MF, MIMD.getPCSections());
+ if (MIMD.getMMRAMetadata())
+ MI->setMMRAMetadata(*MF, MIMD.getMMRAMetadata());
return *this;
}
@@ -343,38 +365,11 @@ public:
}
};
-/// Set of metadata that should be preserved when using BuildMI(). This provides
-/// a more convenient way of preserving DebugLoc, PCSections and MMRA.
-class MIMetadata {
-public:
- MIMetadata() = default;
- MIMetadata(DebugLoc DL, MDNode *PCSections = nullptr, MDNode *MMRA = nullptr)
- : DL(std::move(DL)), PCSections(PCSections), MMRA(MMRA) {}
- MIMetadata(const DILocation *DI, MDNode *PCSections = nullptr,
- MDNode *MMRA = nullptr)
- : DL(DI), PCSections(PCSections), MMRA(MMRA) {}
- explicit MIMetadata(const Instruction &From)
- : DL(From.getDebugLoc()),
- PCSections(From.getMetadata(LLVMContext::MD_pcsections)) {}
- explicit MIMetadata(const MachineInstr &From)
- : DL(From.getDebugLoc()), PCSections(From.getPCSections()) {}
-
- const DebugLoc &getDL() const { return DL; }
- MDNode *getPCSections() const { return PCSections; }
- MDNode *getMMRAMetadata() const { return MMRA; }
-
-private:
- DebugLoc DL;
- MDNode *PCSections = nullptr;
- MDNode *MMRA = nullptr;
-};
-
/// Builder interface. Specify how to create the initial instruction itself.
inline MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD,
const MCInstrDesc &MCID) {
return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, MIMD.getDL()))
- .setPCSections(MIMD.getPCSections())
- .setMMRAMetadata(MIMD.getMMRAMetadata());
+ .copyMIMetadata(MIMD);
}
/// This version of the builder sets up the first operand as a
@@ -382,8 +377,7 @@ inline MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD,
inline MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD,
const MCInstrDesc &MCID, Register DestReg) {
return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, MIMD.getDL()))
- .setPCSections(MIMD.getPCSections())
- .setMMRAMetadata(MIMD.getMMRAMetadata())
+ .copyMIMetadata(MIMD)
.addReg(DestReg, RegState::Define);
}
@@ -397,10 +391,8 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, MIMD.getDL());
BB.insert(I, MI);
- return MachineInstrBuilder(MF, MI)
- .setPCSections(MIMD.getPCSections())
- .setMMRAMetadata(MIMD.getMMRAMetadata())
- .addReg(DestReg, RegState::Define);
+ return MachineInstrBuilder(MF, MI).copyMIMetadata(MIMD).addReg(
+ DestReg, RegState::Define);
}
/// This version of the builder inserts the newly-built instruction before
@@ -416,10 +408,8 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, MIMD.getDL());
BB.insert(I, MI);
- return MachineInstrBuilder(MF, MI)
- .setPCSections(MIMD.getPCSections())
- .setMMRAMetadata(MIMD.getMMRAMetadata())
- .addReg(DestReg, RegState::Define);
+ return MachineInstrBuilder(MF, MI).copyMIMetadata(MIMD).addReg(
+ DestReg, RegState::Define);
}
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
@@ -449,9 +439,7 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, MIMD.getDL());
BB.insert(I, MI);
- return MachineInstrBuilder(MF, MI)
- .setPCSections(MIMD.getPCSections())
- .setMMRAMetadata(MIMD.getMMRAMetadata());
+ return MachineInstrBuilder(MF, MI).copyMIMetadata(MIMD);
}
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
@@ -461,9 +449,7 @@ inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
MachineFunction &MF = *BB.getParent();
MachineInstr *MI = MF.CreateMachineInstr(MCID, MIMD.getDL());
BB.insert(I, MI);
- return MachineInstrBuilder(MF, MI)
- .setPCSections(MIMD.getPCSections())
- .setMMRAMetadata(MIMD.getMMRAMetadata());
+ return MachineInstrBuilder(MF, MI).copyMIMetadata(MIMD);
}
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
diff --git a/llvm/include/llvm/CodeGen/MachineScheduler.h b/llvm/include/llvm/CodeGen/MachineScheduler.h
index efda7eb..5a2aee2 100644
--- a/llvm/include/llvm/CodeGen/MachineScheduler.h
+++ b/llvm/include/llvm/CodeGen/MachineScheduler.h
@@ -1303,8 +1303,8 @@ protected:
SchedBoundary Top;
SchedBoundary Bot;
- ClusterInfo *TopCluster;
- ClusterInfo *BotCluster;
+ unsigned TopClusterID;
+ unsigned BotClusterID;
/// Candidate last picked from Top boundary.
SchedCandidate TopCand;
@@ -1346,8 +1346,8 @@ protected:
/// Candidate last picked from Bot boundary.
SchedCandidate BotCand;
- ClusterInfo *TopCluster;
- ClusterInfo *BotCluster;
+ unsigned TopClusterID;
+ unsigned BotClusterID;
public:
PostGenericScheduler(const MachineSchedContext *C)
diff --git a/llvm/include/llvm/CodeGen/SDPatternMatch.h b/llvm/include/llvm/CodeGen/SDPatternMatch.h
index 2967532..be90250 100644
--- a/llvm/include/llvm/CodeGen/SDPatternMatch.h
+++ b/llvm/include/llvm/CodeGen/SDPatternMatch.h
@@ -578,6 +578,18 @@ m_InsertSubvector(const LHS &Base, const RHS &Sub, const IDX &Idx) {
return TernaryOpc_match<LHS, RHS, IDX>(ISD::INSERT_SUBVECTOR, Base, Sub, Idx);
}
+template <typename LTy, typename RTy, typename TTy, typename FTy, typename CCTy>
+inline auto m_SelectCC(const LTy &L, const RTy &R, const TTy &T, const FTy &F,
+ const CCTy &CC) {
+ return m_Node(ISD::SELECT_CC, L, R, T, F, CC);
+}
+
+template <typename LTy, typename RTy, typename TTy, typename FTy, typename CCTy>
+inline auto m_SelectCCLike(const LTy &L, const RTy &R, const TTy &T,
+ const FTy &F, const CCTy &CC) {
+ return m_AnyOf(m_Select(m_SetCC(L, R, CC), T, F), m_SelectCC(L, R, T, F, CC));
+}
+
// === Binary operations ===
template <typename LHS_P, typename RHS_P, bool Commutable = false,
bool ExcludeChain = false>
diff --git a/llvm/include/llvm/CodeGen/ScheduleDAG.h b/llvm/include/llvm/CodeGen/ScheduleDAG.h
index 3a0a31b..122b7be 100644
--- a/llvm/include/llvm/CodeGen/ScheduleDAG.h
+++ b/llvm/include/llvm/CodeGen/ScheduleDAG.h
@@ -240,6 +240,11 @@ class TargetRegisterInfo;
typedef SmallSet<SUnit *, 8> ClusterInfo;
constexpr unsigned InvalidClusterId = ~0u;
+ /// Return whether the input cluster ID's are the same and valid.
+ inline bool isTheSameCluster(unsigned A, unsigned B) {
+ return A != InvalidClusterId && A == B;
+ }
+
/// Scheduling unit. This is a node in the scheduling DAG.
class SUnit {
private:
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index 8f88811..11ae8cd 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -182,7 +182,7 @@ public:
return SDValue(Node, R);
}
- /// Return true if this node is an operand of N.
+ /// Return true if the referenced return value is an operand of N.
LLVM_ABI bool isOperandOf(const SDNode *N) const;
/// Return the ValueType of the referenced return value.
diff --git a/llvm/include/llvm/Frontend/HLSL/HLSLBinding.h b/llvm/include/llvm/Frontend/HLSL/HLSLBinding.h
new file mode 100644
index 0000000..70a2eeb
--- /dev/null
+++ b/llvm/include/llvm/Frontend/HLSL/HLSLBinding.h
@@ -0,0 +1,162 @@
+//===- HLSLBinding.h - Representation for resource bindings in HLSL -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file This file contains objects to represent resource bindings.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FRONTEND_HLSL_HLSLBINDING_H
+#define LLVM_FRONTEND_HLSL_HLSLBINDING_H
+
+#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DXILABI.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+namespace hlsl {
+
+/// BindingInfo represents the ranges of bindings and free space for each
+/// `dxil::ResourceClass`. This can represent HLSL-level bindings as well as
+/// bindings described in root signatures, and can be used for analysis of
+/// overlapping or missing bindings as well as for finding space for implicit
+/// bindings.
+///
+/// As an example, given these resource bindings:
+///
+/// RWBuffer<float> A[10] : register(u3);
+/// RWBuffer<float> B[] : register(u5, space2)
+///
+/// The binding info for UAV bindings should look like this:
+///
+/// UAVSpaces {
+/// ResClass = ResourceClass::UAV,
+/// Spaces = {
+/// { Space = 0u, FreeRanges = {{ 0u, 2u }, { 13u, ~0u }} },
+/// { Space = 2u, FreeRanges = {{ 0u, 4u }} }
+/// }
+/// }
+class BindingInfo {
+public:
+ struct BindingRange {
+ uint32_t LowerBound;
+ uint32_t UpperBound;
+ BindingRange(uint32_t LB, uint32_t UB) : LowerBound(LB), UpperBound(UB) {}
+ };
+
+ struct RegisterSpace {
+ uint32_t Space;
+ SmallVector<BindingRange> FreeRanges;
+ RegisterSpace(uint32_t Space) : Space(Space) {
+ FreeRanges.emplace_back(0, ~0u);
+ }
+ // Size == -1 means unbounded array
+ LLVM_ABI std::optional<uint32_t> findAvailableBinding(int32_t Size);
+ };
+
+ struct BindingSpaces {
+ dxil::ResourceClass RC;
+ llvm::SmallVector<RegisterSpace> Spaces;
+ BindingSpaces(dxil::ResourceClass RC) : RC(RC) {}
+ LLVM_ABI RegisterSpace &getOrInsertSpace(uint32_t Space);
+ };
+
+private:
+ BindingSpaces SRVSpaces{dxil::ResourceClass::SRV};
+ BindingSpaces UAVSpaces{dxil::ResourceClass::UAV};
+ BindingSpaces CBufferSpaces{dxil::ResourceClass::CBuffer};
+ BindingSpaces SamplerSpaces{dxil::ResourceClass::Sampler};
+
+public:
+ BindingSpaces &getBindingSpaces(dxil::ResourceClass RC) {
+ switch (RC) {
+ case dxil::ResourceClass::SRV:
+ return SRVSpaces;
+ case dxil::ResourceClass::UAV:
+ return UAVSpaces;
+ case dxil::ResourceClass::CBuffer:
+ return CBufferSpaces;
+ case dxil::ResourceClass::Sampler:
+ return SamplerSpaces;
+ }
+
+ llvm_unreachable("Invalid resource class");
+ }
+ const BindingSpaces &getBindingSpaces(dxil::ResourceClass RC) const {
+ return const_cast<BindingInfo *>(this)->getBindingSpaces(RC);
+ }
+
+ // Size == -1 means unbounded array
+ LLVM_ABI std::optional<uint32_t>
+ findAvailableBinding(dxil::ResourceClass RC, uint32_t Space, int32_t Size);
+
+ friend class BindingInfoBuilder;
+};
+
+/// Builder class for creating a /c BindingInfo.
+class BindingInfoBuilder {
+public:
+ struct Binding {
+ dxil::ResourceClass RC;
+ uint32_t Space;
+ uint32_t LowerBound;
+ uint32_t UpperBound;
+ const void *Cookie;
+
+ Binding(dxil::ResourceClass RC, uint32_t Space, uint32_t LowerBound,
+ uint32_t UpperBound, const void *Cookie)
+ : RC(RC), Space(Space), LowerBound(LowerBound), UpperBound(UpperBound),
+ Cookie(Cookie) {}
+
+ bool isUnbounded() const { return UpperBound == ~0U; }
+
+ bool operator==(const Binding &RHS) const {
+ return std::tie(RC, Space, LowerBound, UpperBound, Cookie) ==
+ std::tie(RHS.RC, RHS.Space, RHS.LowerBound, RHS.UpperBound,
+ RHS.Cookie);
+ }
+ bool operator!=(const Binding &RHS) const { return !(*this == RHS); }
+
+ bool operator<(const Binding &RHS) const {
+ return std::tie(RC, Space, LowerBound) <
+ std::tie(RHS.RC, RHS.Space, RHS.LowerBound);
+ }
+ };
+
+private:
+ SmallVector<Binding> Bindings;
+
+public:
+ void trackBinding(dxil::ResourceClass RC, uint32_t Space, uint32_t LowerBound,
+ uint32_t UpperBound, const void *Cookie) {
+ Bindings.emplace_back(RC, Space, LowerBound, UpperBound, Cookie);
+ }
+ /// Calculate the binding info - \c ReportOverlap will be called once for each
+ /// overlapping binding.
+ BindingInfo calculateBindingInfo(
+ llvm::function_ref<void(const BindingInfoBuilder &Builder,
+ const Binding &Overlapping)>
+ ReportOverlap);
+
+ /// Calculate the binding info - \c HasOverlap will be set to indicate whether
+ /// there are any overlapping bindings.
+ BindingInfo calculateBindingInfo(bool &HasOverlap) {
+ HasOverlap = false;
+ return calculateBindingInfo(
+ [&HasOverlap](auto, auto) { HasOverlap = true; });
+ }
+
+ /// For use in the \c ReportOverlap callback of \c calculateBindingInfo -
+ /// finds a binding that the \c ReportedBinding overlaps with.
+ const Binding &findOverlapping(const Binding &ReportedBinding) const;
+};
+
+} // namespace hlsl
+} // namespace llvm
+
+#endif // LLVM_FRONTEND_HLSL_HLSLBINDING_H
diff --git a/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h b/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h
index 6fa51ed..0bd0774 100644
--- a/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h
+++ b/llvm/include/llvm/Frontend/HLSL/RootSignatureMetadata.h
@@ -14,6 +14,7 @@
#ifndef LLVM_FRONTEND_HLSL_ROOTSIGNATUREMETADATA_H
#define LLVM_FRONTEND_HLSL_ROOTSIGNATUREMETADATA_H
+#include "llvm/ADT/StringRef.h"
#include "llvm/Frontend/HLSL/HLSLRootSignature.h"
#include "llvm/IR/Constants.h"
#include "llvm/MC/DXContainerRootSignature.h"
@@ -26,6 +27,80 @@ class Metadata;
namespace hlsl {
namespace rootsig {
+template <typename T>
+class RootSignatureValidationError
+ : public ErrorInfo<RootSignatureValidationError<T>> {
+public:
+ static char ID;
+ StringRef ParamName;
+ T Value;
+
+ RootSignatureValidationError(StringRef ParamName, T Value)
+ : ParamName(ParamName), Value(Value) {}
+
+ void log(raw_ostream &OS) const override {
+ OS << "Invalid value for " << ParamName << ": " << Value;
+ }
+
+ std::error_code convertToErrorCode() const override {
+ return llvm::inconvertibleErrorCode();
+ }
+};
+
+class GenericRSMetadataError : public ErrorInfo<GenericRSMetadataError> {
+public:
+ static char ID;
+ StringRef Message;
+ MDNode *MD;
+
+ GenericRSMetadataError(StringRef Message, MDNode *MD)
+ : Message(Message), MD(MD) {}
+
+ void log(raw_ostream &OS) const override {
+ OS << Message;
+ if (MD) {
+ OS << "\n";
+ MD->printTree(OS);
+ }
+ }
+
+ std::error_code convertToErrorCode() const override {
+ return llvm::inconvertibleErrorCode();
+ }
+};
+
+class InvalidRSMetadataFormat : public ErrorInfo<InvalidRSMetadataFormat> {
+public:
+ static char ID;
+ StringRef ElementName;
+
+ InvalidRSMetadataFormat(StringRef ElementName) : ElementName(ElementName) {}
+
+ void log(raw_ostream &OS) const override {
+ OS << "Invalid format for " << ElementName;
+ }
+
+ std::error_code convertToErrorCode() const override {
+ return llvm::inconvertibleErrorCode();
+ }
+};
+
+class InvalidRSMetadataValue : public ErrorInfo<InvalidRSMetadataValue> {
+public:
+ static char ID;
+ StringRef ParamName;
+
+ InvalidRSMetadataValue(StringRef ParamName) : ParamName(ParamName) {}
+
+ void log(raw_ostream &OS) const override {
+ OS << "Invalid value for " << ParamName;
+ }
+
+ std::error_code convertToErrorCode() const override {
+ return llvm::inconvertibleErrorCode();
+ }
+};
+
class MetadataBuilder {
public:
MetadataBuilder(llvm::LLVMContext &Ctx, ArrayRef<RootElement> Elements)
@@ -66,29 +141,27 @@ class MetadataParser {
public:
MetadataParser(MDNode *Root) : Root(Root) {}
- LLVM_ABI bool ParseRootSignature(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD);
+ LLVM_ABI llvm::Expected<llvm::mcdxbc::RootSignatureDesc>
+ ParseRootSignature(uint32_t Version);
private:
- bool parseRootFlags(LLVMContext *Ctx, mcdxbc::RootSignatureDesc &RSD,
- MDNode *RootFlagNode);
- bool parseRootConstants(LLVMContext *Ctx, mcdxbc::RootSignatureDesc &RSD,
- MDNode *RootConstantNode);
- bool parseRootDescriptors(LLVMContext *Ctx, mcdxbc::RootSignatureDesc &RSD,
- MDNode *RootDescriptorNode,
- RootSignatureElementKind ElementKind);
- bool parseDescriptorRange(LLVMContext *Ctx, mcdxbc::DescriptorTable &Table,
- MDNode *RangeDescriptorNode);
- bool parseDescriptorTable(LLVMContext *Ctx, mcdxbc::RootSignatureDesc &RSD,
- MDNode *DescriptorTableNode);
- bool parseRootSignatureElement(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD,
- MDNode *Element);
- bool parseStaticSampler(LLVMContext *Ctx, mcdxbc::RootSignatureDesc &RSD,
- MDNode *StaticSamplerNode);
-
- bool validateRootSignature(LLVMContext *Ctx,
- const llvm::mcdxbc::RootSignatureDesc &RSD);
+ llvm::Error parseRootFlags(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *RootFlagNode);
+ llvm::Error parseRootConstants(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *RootConstantNode);
+ llvm::Error parseRootDescriptors(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *RootDescriptorNode,
+ RootSignatureElementKind ElementKind);
+ llvm::Error parseDescriptorRange(mcdxbc::DescriptorTable &Table,
+ MDNode *RangeDescriptorNode);
+ llvm::Error parseDescriptorTable(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *DescriptorTableNode);
+ llvm::Error parseRootSignatureElement(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *Element);
+ llvm::Error parseStaticSampler(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *StaticSamplerNode);
+
+ llvm::Error validateRootSignature(const llvm::mcdxbc::RootSignatureDesc &RSD);
MDNode *Root;
};
diff --git a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
index 7919f7a..ce1cedc 100644
--- a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
+++ b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
@@ -578,8 +578,9 @@ struct DynamicAllocatorsT {
template <typename T, typename I, typename E> //
struct EnterT {
using List = ObjectListT<I, E>;
- using WrapperTrait = std::true_type;
- List v;
+ ENUM(Modifier, Automap);
+ using TupleTrait = std::true_type;
+ std::tuple<OPT(Modifier), List> t;
};
// V5.2: [5.6.2] `exclusive` clause
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMP.td b/llvm/include/llvm/Frontend/OpenMP/OMP.td
index 1b94657..f11eccc 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMP.td
+++ b/llvm/include/llvm/Frontend/OpenMP/OMP.td
@@ -179,7 +179,7 @@ def OMPC_DynamicAllocators : Clause<[Spelling<"dynamic_allocators">]> {
let clangClass = "OMPDynamicAllocatorsClause";
}
def OMPC_Enter : Clause<[Spelling<"enter">]> {
- let flangClass = "OmpObjectList";
+ let flangClass = "OmpEnterClause";
}
def OMPC_Exclusive : Clause<[Spelling<"exclusive">]> {
let clangClass = "OMPExclusiveClause";
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 1da4e36..7265a76 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -593,6 +593,14 @@ def int_amdgcn_tanh : DefaultAttrsIntrinsic<
[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;
+def int_amdgcn_cvt_sr_pk_f16_f32 : DefaultAttrsIntrinsic<
+ [llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable]
+>, ClangBuiltin<"__builtin_amdgcn_cvt_sr_pk_f16_f32">;
+
+def int_amdgcn_cvt_sr_pk_bf16_f32 : DefaultAttrsIntrinsic<
+ [llvm_v2bf16_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable]
+>, ClangBuiltin<"__builtin_amdgcn_cvt_sr_pk_bf16_f32">;
+
def int_amdgcn_cvt_pk_f16_fp8 : DefaultAttrsIntrinsic<
[llvm_v2f16_ty], [llvm_i16_ty], [IntrNoMem, IntrSpeculatable]
>, ClangBuiltin<"__builtin_amdgcn_cvt_pk_f16_fp8">;
@@ -601,18 +609,57 @@ def int_amdgcn_cvt_pk_f16_bf8 : DefaultAttrsIntrinsic<
[llvm_v2f16_ty], [llvm_i16_ty], [IntrNoMem, IntrSpeculatable]
>, ClangBuiltin<"__builtin_amdgcn_cvt_pk_f16_bf8">;
-class AMDGPUCvtScaleF32Intrinsic<LLVMType DstTy, LLVMType Src0Ty, string name> : DefaultAttrsIntrinsic<
- [DstTy], [Src0Ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable]
+def int_amdgcn_cvt_pk_fp8_f16
+ : DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_v2f16_ty],
+ [IntrNoMem, IntrSpeculatable]>,
+ ClangBuiltin<"__builtin_amdgcn_cvt_pk_fp8_f16">;
+
+def int_amdgcn_cvt_pk_bf8_f16
+ : DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_v2f16_ty],
+ [IntrNoMem, IntrSpeculatable]>,
+ ClangBuiltin<"__builtin_amdgcn_cvt_pk_bf8_f16">;
+
+// llvm.amdgcn.cvt.sr.fp8.f16 i32 vdst, half src, i32 seed, i32 old, imm byte_sel [0..3]
+// byte_sel selects byte to write in vdst.
+def int_amdgcn_cvt_sr_fp8_f16 : DefaultAttrsIntrinsic<
+ [llvm_i32_ty], [llvm_half_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
+>, ClangBuiltin<"__builtin_amdgcn_cvt_sr_fp8_f16">;
+
+// llvm.amdgcn.cvt.sr.bf8.f16 i32 vdst, half src, i32 seed, i32 old, imm byte_sel [0..3]
+// byte_sel selects byte to write in vdst.
+def int_amdgcn_cvt_sr_bf8_f16 : DefaultAttrsIntrinsic<
+ [llvm_i32_ty], [llvm_half_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
+>, ClangBuiltin<"__builtin_amdgcn_cvt_sr_bf8_f16">;
+
+// llvm.amdgcn.cvt.scale.pk32.f16.bf6 v32f16 vdst, v6i32 src0, i32 scale_sel [0..7]
+class AMDGPUCvtScaleIntrinsic<LLVMType DstTy, LLVMType Src0Ty, string name> : DefaultAttrsIntrinsic<
+ [DstTy], [Src0Ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]
>, ClangBuiltin<"__builtin_amdgcn_"#name>;
-class AMDGPUCvtScaleF32ToFP6BF6Intrinsic<LLVMType DstTy, LLVMType Src0Ty, LLVMType Src1Ty, string name> : DefaultAttrsIntrinsic<
- [DstTy], [Src0Ty, Src1Ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable]
+class AMDGPUCvtScaleF32Intrinsic<LLVMType DstTy, LLVMType Src0Ty, string name> : DefaultAttrsIntrinsic<
+ [DstTy], [Src0Ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable]
>, ClangBuiltin<"__builtin_amdgcn_"#name>;
class AMDGPUCvtScaleF32SRIntrinsic<LLVMType DstTy, LLVMType Src0Ty, string name> : DefaultAttrsIntrinsic<
[DstTy], [Src0Ty, llvm_i32_ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable]
>, ClangBuiltin<"__builtin_amdgcn_"#name>;
+def int_amdgcn_cvt_scale_pk8_f16_fp8 : AMDGPUCvtScaleIntrinsic<llvm_v8f16_ty, llvm_v2i32_ty, "cvt_scale_pk8_f16_fp8">;
+def int_amdgcn_cvt_scale_pk8_bf16_fp8 : AMDGPUCvtScaleIntrinsic<llvm_v8bf16_ty, llvm_v2i32_ty, "cvt_scale_pk8_bf16_fp8">;
+def int_amdgcn_cvt_scale_pk8_f16_bf8 : AMDGPUCvtScaleIntrinsic<llvm_v8f16_ty, llvm_v2i32_ty, "cvt_scale_pk8_f16_bf8">;
+def int_amdgcn_cvt_scale_pk8_bf16_bf8 : AMDGPUCvtScaleIntrinsic<llvm_v8bf16_ty, llvm_v2i32_ty, "cvt_scale_pk8_bf16_bf8">;
+def int_amdgcn_cvt_scale_pk8_f16_fp4 : AMDGPUCvtScaleIntrinsic<llvm_v8f16_ty, llvm_i32_ty, "cvt_scale_pk8_f16_fp4">;
+def int_amdgcn_cvt_scale_pk8_bf16_fp4 : AMDGPUCvtScaleIntrinsic<llvm_v8bf16_ty, llvm_i32_ty, "cvt_scale_pk8_bf16_fp4">;
+def int_amdgcn_cvt_scale_pk8_f32_fp8 : AMDGPUCvtScaleIntrinsic<llvm_v8f32_ty, llvm_v2i32_ty, "cvt_scale_pk8_f32_fp8">;
+def int_amdgcn_cvt_scale_pk8_f32_bf8 : AMDGPUCvtScaleIntrinsic<llvm_v8f32_ty, llvm_v2i32_ty, "cvt_scale_pk8_f32_bf8">;
+def int_amdgcn_cvt_scale_pk8_f32_fp4 : AMDGPUCvtScaleIntrinsic<llvm_v8f32_ty, llvm_i32_ty, "cvt_scale_pk8_f32_fp4">;
+
+class AMDGPUCvtScaleF32ToFP6BF6Intrinsic<LLVMType DstTy, LLVMType Src0Ty, LLVMType Src1Ty, string name> : DefaultAttrsIntrinsic<
+ [DstTy], [Src0Ty, Src1Ty, llvm_float_ty], [IntrNoMem, IntrSpeculatable]
+>, ClangBuiltin<"__builtin_amdgcn_"#name>;
+
def int_amdgcn_cvt_scalef32_pk32_fp6_f16 : AMDGPUCvtScaleF32Intrinsic<llvm_v6i32_ty, llvm_v32f16_ty, "cvt_scalef32_pk32_fp6_f16">;
def int_amdgcn_cvt_scalef32_pk32_bf6_f16 : AMDGPUCvtScaleF32Intrinsic<llvm_v6i32_ty, llvm_v32f16_ty, "cvt_scalef32_pk32_bf6_f16">;
def int_amdgcn_cvt_scalef32_pk32_fp6_bf16 : AMDGPUCvtScaleF32Intrinsic<llvm_v6i32_ty, llvm_v32bf16_ty, "cvt_scalef32_pk32_fp6_bf16">;
@@ -3473,6 +3520,12 @@ def int_amdgcn_cvt_pk_fp8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_fp8_f32">,
[llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i1_ty],
[IntrNoMem, ImmArg<ArgIndex<3>>]>;
+// llvm.amdgcn.cvt.pk.fp8.f32.e5m3 int vdst, float srcA, float srcB, int old, imm word_sel
+def int_amdgcn_cvt_pk_fp8_f32_e5m3 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_fp8_f32_e5m3">,
+ DefaultAttrsIntrinsic<[llvm_i32_ty],
+ [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i1_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
// llvm.amdgcn.cvt.sr.bf8.f32 int vdst, float srcA, int srcB, int old, imm byte_sel [0..3]
// byte_sel selects byte to write into vdst.
def int_amdgcn_cvt_sr_bf8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_sr_bf8_f32">,
@@ -3486,6 +3539,12 @@ def int_amdgcn_cvt_sr_fp8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_sr_fp8_f32">,
[llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<3>>]>;
+// llvm.amdgcn.cvt.sr.fp8.f32.e5m3 int vdst, float srcA, int srcB, int old, imm byte_sel [0..3]
+def int_amdgcn_cvt_sr_fp8_f32_e5m3 : ClangBuiltin<"__builtin_amdgcn_cvt_sr_fp8_f32_e5m3">,
+ DefaultAttrsIntrinsic<[llvm_i32_ty],
+ [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<3>>]>;
+
// llvm.amdgcn.cvt.off.fp32.i4 int srcA
def int_amdgcn_cvt_off_f32_i4: ClangBuiltin<"__builtin_amdgcn_cvt_off_f32_i4">,
DefaultAttrsIntrinsic<[llvm_float_ty],
diff --git a/llvm/include/llvm/LTO/LTO.h b/llvm/include/llvm/LTO/LTO.h
index acc78bb..323c478 100644
--- a/llvm/include/llvm/LTO/LTO.h
+++ b/llvm/include/llvm/LTO/LTO.h
@@ -546,10 +546,12 @@ private:
// resolutions used by a single input module. Functions return ranges refering
// to the resolutions for the remaining modules in the InputFile.
Expected<ArrayRef<SymbolResolution>>
- addModule(InputFile &Input, unsigned ModI, ArrayRef<SymbolResolution> Res);
+ addModule(InputFile &Input, ArrayRef<SymbolResolution> InputRes,
+ unsigned ModI, ArrayRef<SymbolResolution> Res);
Expected<std::pair<RegularLTOState::AddedModule, ArrayRef<SymbolResolution>>>
- addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
+ addRegularLTO(InputFile &Input, ArrayRef<SymbolResolution> InputRes,
+ BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
ArrayRef<SymbolResolution> Res);
Error linkRegularLTO(RegularLTOState::AddedModule Mod,
bool LivenessFromIndex);
diff --git a/llvm/include/llvm/MC/MCObjectFileInfo.h b/llvm/include/llvm/MC/MCObjectFileInfo.h
index 5ce58ae..d69560c 100644
--- a/llvm/include/llvm/MC/MCObjectFileInfo.h
+++ b/llvm/include/llvm/MC/MCObjectFileInfo.h
@@ -69,6 +69,9 @@ protected:
/// Language Specific Data Area information is emitted to.
MCSection *LSDASection = nullptr;
+ /// Section containing call graph metadata.
+ MCSection *CallGraphSection = nullptr;
+
/// If exception handling is supported by the target and the target can
/// support a compact representation of the CIE and FDE, this is the section
/// to emit them into.
@@ -359,6 +362,8 @@ public:
MCSection *getFaultMapSection() const { return FaultMapSection; }
MCSection *getRemarksSection() const { return RemarksSection; }
+ MCSection *getCallGraphSection(const MCSection &TextSec) const;
+
MCSection *getStackSizesSection(const MCSection &TextSec) const;
MCSection *getBBAddrMapSection(const MCSection &TextSec) const;
diff --git a/llvm/include/llvm/MC/MCObjectStreamer.h b/llvm/include/llvm/MC/MCObjectStreamer.h
index 5ac7aba..4b43a8f 100644
--- a/llvm/include/llvm/MC/MCObjectStreamer.h
+++ b/llvm/include/llvm/MC/MCObjectStreamer.h
@@ -52,10 +52,6 @@ class MCObjectStreamer : public MCStreamer {
DenseMap<const MCSymbol *, SmallVector<PendingAssignment, 1>>
pendingAssignments;
- SmallVector<std::unique_ptr<char[]>, 0> FragStorage;
- // Available bytes in the current block for trailing data or new fragments.
- size_t FragSpace = 0;
-
void emitInstToData(const MCInst &Inst, const MCSubtargetInfo &);
void emitCFIStartProcImpl(MCDwarfFrameInfo &Frame) override;
void emitCFIEndProcImpl(MCDwarfFrameInfo &Frame) override;
@@ -88,18 +84,11 @@ public:
// Add a fragment with a variable-size tail and start a new empty fragment.
void insert(MCFragment *F);
- char *getCurFragEnd() const {
- return reinterpret_cast<char *>(CurFrag + 1) + CurFrag->getFixedSize();
- }
- MCFragment *allocFragSpace(size_t Headroom);
// Add a new fragment to the current section without a variable-size tail.
void newFragment();
- void ensureHeadroom(size_t Headroom);
void appendContents(ArrayRef<char> Contents);
void appendContents(size_t Num, char Elt);
- // Add a fixup to the current fragment. Call ensureHeadroom beforehand to
- // ensure the fixup and appended content apply to the same fragment.
void addFixup(const MCExpr *Value, MCFixupKind Kind);
void emitLabel(MCSymbol *Symbol, SMLoc Loc = SMLoc()) override;
diff --git a/llvm/include/llvm/MC/MCSection.h b/llvm/include/llvm/MC/MCSection.h
index 2e929d8..df8f617b 100644
--- a/llvm/include/llvm/MC/MCSection.h
+++ b/llvm/include/llvm/MC/MCSection.h
@@ -93,7 +93,8 @@ protected:
// Track content and fixups for the fixed-size part as fragments are
// appended to the section. The content remains immutable, except when
// modified by applyFixup.
- uint32_t FixedSize = 0;
+ uint32_t ContentStart = 0;
+ uint32_t ContentEnd = 0;
uint32_t FixupStart = 0;
uint32_t FixupEnd = 0;
@@ -187,6 +188,18 @@ public:
//== Content-related functions manage parent's storage using ContentStart and
// ContentSize.
+ // Get a SmallVector reference. The caller should call doneAppending to update
+ // `ContentEnd`.
+ SmallVectorImpl<char> &getContentsForAppending();
+ void doneAppending();
+ void appendContents(ArrayRef<char> Contents) {
+ getContentsForAppending().append(Contents.begin(), Contents.end());
+ doneAppending();
+ }
+ void appendContents(size_t Num, char Elt) {
+ getContentsForAppending().append(Num, Elt);
+ doneAppending();
+ }
MutableArrayRef<char> getContents();
ArrayRef<char> getContents() const;
@@ -195,10 +208,10 @@ public:
MutableArrayRef<char> getVarContents();
ArrayRef<char> getVarContents() const;
- size_t getFixedSize() const { return FixedSize; }
+ size_t getFixedSize() const { return ContentEnd - ContentStart; }
size_t getVarSize() const { return VarContentEnd - VarContentStart; }
size_t getSize() const {
- return FixedSize + (VarContentEnd - VarContentStart);
+ return ContentEnd - ContentStart + (VarContentEnd - VarContentStart);
}
//== Fixup-related functions manage parent's storage using FixupStart and
@@ -621,11 +634,28 @@ public:
bool isBssSection() const { return IsBss; }
};
+inline SmallVectorImpl<char> &MCFragment::getContentsForAppending() {
+ SmallVectorImpl<char> &S = getParent()->ContentStorage;
+ if (LLVM_UNLIKELY(ContentEnd != S.size())) {
+ // Move the elements to the end. Reserve space to avoid invalidating
+ // S.begin()+I for `append`.
+ auto Size = ContentEnd - ContentStart;
+ auto I = std::exchange(ContentStart, S.size());
+ S.reserve(S.size() + Size);
+ S.append(S.begin() + I, S.begin() + I + Size);
+ }
+ return S;
+}
+inline void MCFragment::doneAppending() {
+ ContentEnd = getParent()->ContentStorage.size();
+}
inline MutableArrayRef<char> MCFragment::getContents() {
- return {reinterpret_cast<char *>(this + 1), FixedSize};
+ return MutableArrayRef(getParent()->ContentStorage)
+ .slice(ContentStart, ContentEnd - ContentStart);
}
inline ArrayRef<char> MCFragment::getContents() const {
- return {reinterpret_cast<const char *>(this + 1), FixedSize};
+ return ArrayRef(getParent()->ContentStorage)
+ .slice(ContentStart, ContentEnd - ContentStart);
}
inline MutableArrayRef<char> MCFragment::getVarContents() {
diff --git a/llvm/include/llvm/ObjectYAML/ELFYAML.h b/llvm/include/llvm/ObjectYAML/ELFYAML.h
index e883f2f..3bf8c29 100644
--- a/llvm/include/llvm/ObjectYAML/ELFYAML.h
+++ b/llvm/include/llvm/ObjectYAML/ELFYAML.h
@@ -117,7 +117,7 @@ struct FileHeader {
llvm::yaml::Hex8 ABIVersion;
ELF_ET Type;
std::optional<ELF_EM> Machine;
- ELF_EF Flags;
+ std::optional<ELF_EF> Flags;
llvm::yaml::Hex64 Entry;
std::optional<StringRef> SectionHeaderStringTable;
diff --git a/llvm/include/llvm/ProfileData/MemProfData.inc b/llvm/include/llvm/ProfileData/MemProfData.inc
index 3f785bd..26baddd 100644
--- a/llvm/include/llvm/ProfileData/MemProfData.inc
+++ b/llvm/include/llvm/ProfileData/MemProfData.inc
@@ -33,11 +33,10 @@
(uint64_t)'o' << 24 | (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129)
// The version number of the raw binary format.
-#define MEMPROF_RAW_VERSION 4ULL
+#define MEMPROF_RAW_VERSION 5ULL
// Currently supported versions.
-#define MEMPROF_RAW_SUPPORTED_VERSIONS \
- { 3ULL, 4ULL }
+#define MEMPROF_RAW_SUPPORTED_VERSIONS {3ULL, 4ULL, 5ULL}
#define MEMPROF_V3_MIB_SIZE 132ULL;
@@ -229,6 +228,41 @@ void Merge(const MemInfoBlock &newMIB) {
} __attribute__((__packed__));
#endif
+constexpr int MantissaBits = 12;
+constexpr int ExponentBits = 4;
+constexpr uint16_t MaxMantissa = (1U << MantissaBits) - 1;
+constexpr uint16_t MaxExponent = (1U << ExponentBits) - 1;
+constexpr uint64_t MaxRepresentableValue = static_cast<uint64_t>(MaxMantissa)
+ << MaxExponent;
+
+// Encodes a 64-bit unsigned integer into a 16-bit scaled integer format.
+inline uint16_t encodeHistogramCount(uint64_t Count) {
+ if (Count == 0)
+ return 0;
+
+ if (Count > MaxRepresentableValue)
+ Count = MaxRepresentableValue;
+
+ if (Count <= MaxMantissa)
+ return Count;
+
+ uint64_t M = Count;
+ uint16_t E = 0;
+ while (M > MaxMantissa) {
+ M = (M + 1) >> 1;
+ E++;
+ }
+ return (E << MantissaBits) | static_cast<uint16_t>(M);
+}
+
+// Decodes a 16-bit scaled integer and returns the
+// decoded 64-bit unsigned integer.
+inline uint64_t decodeHistogramCount(uint16_t EncodedValue) {
+ const uint16_t E = EncodedValue >> MantissaBits;
+ const uint16_t M = EncodedValue & MaxMantissa;
+ return static_cast<uint64_t>(M) << E;
+}
+
} // namespace memprof
} // namespace llvm
diff --git a/llvm/include/llvm/Support/VirtualFileSystem.h b/llvm/include/llvm/Support/VirtualFileSystem.h
index 734b795..d976773 100644
--- a/llvm/include/llvm/Support/VirtualFileSystem.h
+++ b/llvm/include/llvm/Support/VirtualFileSystem.h
@@ -1069,7 +1069,7 @@ public:
/// Redirect each of the remapped files from first to second.
static std::unique_ptr<RedirectingFileSystem>
create(ArrayRef<std::pair<std::string, std::string>> RemappedFiles,
- bool UseExternalNames, FileSystem &ExternalFS);
+ bool UseExternalNames, IntrusiveRefCntPtr<FileSystem> ExternalFS);
ErrorOr<Status> status(const Twine &Path) override;
bool exists(const Twine &Path) override;
diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h
index 719c0ee..e57032a 100644
--- a/llvm/include/llvm/Transforms/IPO/Attributor.h
+++ b/llvm/include/llvm/Transforms/IPO/Attributor.h
@@ -6494,7 +6494,7 @@ struct AAAllocationInfo : public StateWrapper<BooleanState, AbstractAttribute> {
}
constexpr static const std::optional<TypeSize> HasNoAllocationSize =
- std::optional<TypeSize>(TypeSize(-1, true));
+ std::make_optional<TypeSize>(-1, true);
LLVM_ABI static const char ID;
};
diff --git a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
index cba3736..43ff084 100644
--- a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
+++ b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
@@ -400,19 +400,11 @@ public:
/// Returns true if the loop has exactly one uncountable early exit, i.e. an
/// uncountable exit that isn't the latch block.
- bool hasUncountableEarlyExit() const {
- return getUncountableEdge().has_value();
- }
+ bool hasUncountableEarlyExit() const { return UncountableExitingBB; }
/// Returns the uncountable early exiting block, if there is exactly one.
BasicBlock *getUncountableEarlyExitingBlock() const {
- return hasUncountableEarlyExit() ? getUncountableEdge()->first : nullptr;
- }
-
- /// Returns the destination of the uncountable early exiting block, if there
- /// is exactly one.
- BasicBlock *getUncountableEarlyExitBlock() const {
- return hasUncountableEarlyExit() ? getUncountableEdge()->second : nullptr;
+ return UncountableExitingBB;
}
/// Return true if there is store-load forwarding dependencies.
@@ -473,13 +465,6 @@ public:
return CountableExitingBlocks;
}
- /// Returns the loop edge to an uncountable exit, or std::nullopt if there
- /// isn't a single such edge.
- std::optional<std::pair<BasicBlock *, BasicBlock *>>
- getUncountableEdge() const {
- return UncountableEdge;
- }
-
private:
/// Return true if the pre-header, exiting and latch blocks of \p Lp and all
/// its nested loops are considered legal for vectorization. These legal
@@ -659,9 +644,9 @@ private:
/// the exact backedge taken count is not computable.
SmallVector<BasicBlock *, 4> CountableExitingBlocks;
- /// Keep track of the loop edge to an uncountable exit, comprising a pair
- /// of (Exiting, Exit) blocks, if there is exactly one early exit.
- std::optional<std::pair<BasicBlock *, BasicBlock *>> UncountableEdge;
+ /// Keep track of an uncountable exiting block, if there is exactly one early
+ /// exit.
+ BasicBlock *UncountableExitingBB = nullptr;
};
} // namespace llvm
diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt
index cfde787..16dd6f8 100644
--- a/llvm/lib/Analysis/CMakeLists.txt
+++ b/llvm/lib/Analysis/CMakeLists.txt
@@ -175,6 +175,7 @@ add_llvm_component_library(LLVMAnalysis
LINK_COMPONENTS
BinaryFormat
Core
+ FrontendHLSL
Object
ProfileData
Support
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 2d52f34..dd98b62 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -2679,11 +2679,12 @@ static Constant *ConstantFoldScalarCall1(StringRef Name,
case Intrinsic::nvvm_round_ftz_f:
case Intrinsic::nvvm_round_f:
case Intrinsic::nvvm_round_d: {
- // Use APFloat implementation instead of native libm call, as some
- // implementations (e.g. on PPC) do not preserve the sign of negative 0.
+ // nvvm_round is lowered to PTX cvt.rni, which will round to nearest
+ // integer, choosing even integer if source is equidistant between two
+ // integers, so the semantics are closer to "rint" rather than "round".
bool IsFTZ = nvvm::UnaryMathIntrinsicShouldFTZ(IntrinsicID);
auto V = IsFTZ ? FTZPreserveSign(APF) : APF;
- V.roundToIntegral(APFloat::rmNearestTiesToAway);
+ V.roundToIntegral(APFloat::rmNearestTiesToEven);
return ConstantFP::get(Ty->getContext(), V);
}
diff --git a/llvm/lib/Analysis/DXILResource.cpp b/llvm/lib/Analysis/DXILResource.cpp
index 1959ab6..629fa7cd 100644
--- a/llvm/lib/Analysis/DXILResource.cpp
+++ b/llvm/lib/Analysis/DXILResource.cpp
@@ -995,18 +995,7 @@ SmallVector<dxil::ResourceInfo *> DXILResourceMap::findByUse(const Value *Key) {
//===----------------------------------------------------------------------===//
void DXILResourceBindingInfo::populate(Module &M, DXILResourceTypeMap &DRTM) {
- struct Binding {
- ResourceClass RC;
- uint32_t Space;
- uint32_t LowerBound;
- uint32_t UpperBound;
- Value *Name;
- Binding(ResourceClass RC, uint32_t Space, uint32_t LowerBound,
- uint32_t UpperBound, Value *Name)
- : RC(RC), Space(Space), LowerBound(LowerBound), UpperBound(UpperBound),
- Name(Name) {}
- };
- SmallVector<Binding> Bindings;
+ hlsl::BindingInfoBuilder Builder;
// collect all of the llvm.dx.resource.handlefrombinding calls;
// make a note if there is llvm.dx.resource.handlefromimplicitbinding
@@ -1036,133 +1025,20 @@ void DXILResourceBindingInfo::populate(Module &M, DXILResourceTypeMap &DRTM) {
assert((Size < 0 || (unsigned)LowerBound + Size - 1 <= UINT32_MAX) &&
"upper bound register overflow");
uint32_t UpperBound = Size < 0 ? UINT32_MAX : LowerBound + Size - 1;
- Bindings.emplace_back(RTI.getResourceClass(), Space, LowerBound,
- UpperBound, Name);
+ Builder.trackBinding(RTI.getResourceClass(), Space, LowerBound,
+ UpperBound, Name);
}
break;
}
case Intrinsic::dx_resource_handlefromimplicitbinding: {
- ImplicitBinding = true;
+ HasImplicitBinding = true;
break;
}
}
}
- // sort all the collected bindings
- llvm::stable_sort(Bindings, [](auto &LHS, auto &RHS) {
- return std::tie(LHS.RC, LHS.Space, LHS.LowerBound) <
- std::tie(RHS.RC, RHS.Space, RHS.LowerBound);
- });
-
- // remove duplicates
- Binding *NewEnd = llvm::unique(Bindings, [](auto &LHS, auto &RHS) {
- return std::tie(LHS.RC, LHS.Space, LHS.LowerBound, LHS.UpperBound,
- LHS.Name) == std::tie(RHS.RC, RHS.Space, RHS.LowerBound,
- RHS.UpperBound, RHS.Name);
- });
- if (NewEnd != Bindings.end())
- Bindings.erase(NewEnd);
-
- // Go over the sorted bindings and build up lists of free register ranges
- // for each binding type and used spaces. Bindings are sorted by resource
- // class, space, and lower bound register slot.
- BindingSpaces *BS = &SRVSpaces;
- for (const Binding &B : Bindings) {
- if (BS->RC != B.RC)
- // move to the next resource class spaces
- BS = &getBindingSpaces(B.RC);
-
- RegisterSpace *S = BS->Spaces.empty() ? &BS->Spaces.emplace_back(B.Space)
- : &BS->Spaces.back();
- assert(S->Space <= B.Space && "bindings not sorted correctly?");
- if (B.Space != S->Space)
- // add new space
- S = &BS->Spaces.emplace_back(B.Space);
-
- // The space is full - there are no free slots left, or the rest of the
- // slots are taken by an unbounded array. Set flag to report overlapping
- // binding later.
- if (S->FreeRanges.empty() || S->FreeRanges.back().UpperBound < UINT32_MAX) {
- OverlappingBinding = true;
- continue;
- }
-
- // adjust the last free range lower bound, split it in two, or remove it
- BindingRange &LastFreeRange = S->FreeRanges.back();
- if (LastFreeRange.LowerBound == B.LowerBound) {
- if (B.UpperBound < UINT32_MAX)
- LastFreeRange.LowerBound = B.UpperBound + 1;
- else
- S->FreeRanges.pop_back();
- } else if (LastFreeRange.LowerBound < B.LowerBound) {
- LastFreeRange.UpperBound = B.LowerBound - 1;
- if (B.UpperBound < UINT32_MAX)
- S->FreeRanges.emplace_back(B.UpperBound + 1, UINT32_MAX);
- } else {
- OverlappingBinding = true;
- if (B.UpperBound < UINT32_MAX)
- LastFreeRange.LowerBound =
- std::max(LastFreeRange.LowerBound, B.UpperBound + 1);
- else
- S->FreeRanges.pop_back();
- }
- }
-}
-
-// returns std::nulopt if binding could not be found in given space
-std::optional<uint32_t>
-DXILResourceBindingInfo::findAvailableBinding(dxil::ResourceClass RC,
- uint32_t Space, int32_t Size) {
- BindingSpaces &BS = getBindingSpaces(RC);
- RegisterSpace &RS = BS.getOrInsertSpace(Space);
- return RS.findAvailableBinding(Size);
-}
-
-DXILResourceBindingInfo::RegisterSpace &
-DXILResourceBindingInfo::BindingSpaces::getOrInsertSpace(uint32_t Space) {
- for (auto *I = Spaces.begin(); I != Spaces.end(); ++I) {
- if (I->Space == Space)
- return *I;
- if (I->Space < Space)
- continue;
- return *Spaces.insert(I, Space);
- }
- return Spaces.emplace_back(Space);
-}
-
-std::optional<uint32_t>
-DXILResourceBindingInfo::RegisterSpace::findAvailableBinding(int32_t Size) {
- assert((Size == -1 || Size > 0) && "invalid size");
-
- if (FreeRanges.empty())
- return std::nullopt;
-
- // unbounded array
- if (Size == -1) {
- BindingRange &Last = FreeRanges.back();
- if (Last.UpperBound != UINT32_MAX)
- // this space is already occupied by an unbounded array
- return std::nullopt;
- uint32_t RegSlot = Last.LowerBound;
- FreeRanges.pop_back();
- return RegSlot;
- }
-
- // single resource or fixed-size array
- for (BindingRange &R : FreeRanges) {
- // compare the size as uint64_t to prevent overflow for range (0,
- // UINT32_MAX)
- if ((uint64_t)R.UpperBound - R.LowerBound + 1 < (uint64_t)Size)
- continue;
- uint32_t RegSlot = R.LowerBound;
- // This might create a range where (LowerBound == UpperBound + 1). When
- // that happens, the next time this function is called the range will
- // skipped over by the check above (at this point Size is always > 0).
- R.LowerBound += Size;
- return RegSlot;
- }
-
- return std::nullopt;
+ Bindings = Builder.calculateBindingInfo(
+ [this](auto, auto) { this->HasOverlappingBinding = true; });
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 0990a0d..477e477 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -2682,6 +2682,20 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
return getAddExpr(NewOps, PreservedFlags);
}
}
+
+ // Try to push the constant operand into a ZExt: A + zext (-A + B) -> zext
+ // (B), if trunc (A) + -A + B does not unsigned-wrap.
+ const SCEVAddExpr *InnerAdd;
+ if (match(B, m_scev_ZExt(m_scev_Add(InnerAdd)))) {
+ const SCEV *NarrowA = getTruncateExpr(A, InnerAdd->getType());
+ if (NarrowA == getNegativeSCEV(InnerAdd->getOperand(0)) &&
+ getZeroExtendExpr(NarrowA, B->getType()) == A &&
+ hasFlags(StrengthenNoWrapFlags(this, scAddExpr, {NarrowA, InnerAdd},
+ SCEV::FlagAnyWrap),
+ SCEV::FlagNUW)) {
+ return getZeroExtendExpr(getAddExpr(NarrowA, InnerAdd), B->getType());
+ }
+ }
}
// Canonicalize (-1 * urem X, Y) + X --> (Y * X/Y)
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 55ba52a..c7eb2ec 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -1486,6 +1486,10 @@ void TargetTransformInfo::collectKernelLaunchBounds(
return TTIImpl->collectKernelLaunchBounds(F, LB);
}
+bool TargetTransformInfo::allowVectorElementIndexingUsingGEP() const {
+ return TTIImpl->allowVectorElementIndexingUsingGEP();
+}
+
TargetTransformInfoImplBase::~TargetTransformInfoImplBase() = default;
TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {}
diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index e9cf2ee..b3b4c37 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -81,7 +81,6 @@ bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
case Intrinsic::exp:
case Intrinsic::exp10:
case Intrinsic::exp2:
- case Intrinsic::ldexp:
case Intrinsic::log:
case Intrinsic::log10:
case Intrinsic::log2:
@@ -109,8 +108,6 @@ bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
case Intrinsic::canonicalize:
case Intrinsic::fptosi_sat:
case Intrinsic::fptoui_sat:
- case Intrinsic::lround:
- case Intrinsic::llround:
case Intrinsic::lrint:
case Intrinsic::llrint:
case Intrinsic::ucmp:
@@ -192,8 +189,6 @@ bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(
switch (ID) {
case Intrinsic::fptosi_sat:
case Intrinsic::fptoui_sat:
- case Intrinsic::lround:
- case Intrinsic::llround:
case Intrinsic::lrint:
case Intrinsic::llrint:
case Intrinsic::vp_lrint:
@@ -208,7 +203,6 @@ bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(
case Intrinsic::vp_is_fpclass:
return OpdIdx == 0;
case Intrinsic::powi:
- case Intrinsic::ldexp:
return OpdIdx == -1 || OpdIdx == 1;
default:
return OpdIdx == -1;
@@ -1123,7 +1117,7 @@ Constant *
llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
const InterleaveGroup<Instruction> &Group) {
// All 1's means mask is not needed.
- if (Group.getNumMembers() == Group.getFactor())
+ if (Group.isFull())
return nullptr;
// TODO: support reversed access.
@@ -1669,7 +1663,7 @@ void InterleavedAccessInfo::analyzeInterleaving(
// Case 1: A full group. Can Skip the checks; For full groups, if the wide
// load would wrap around the address space we would do a memory access at
// nullptr even without the transformation.
- if (Group->getNumMembers() == Group->getFactor())
+ if (Group->isFull())
continue;
// Case 2: If first and last members of the group don't wrap this implies
@@ -1704,7 +1698,7 @@ void InterleavedAccessInfo::analyzeInterleaving(
// Case 1: A full group. Can Skip the checks; For full groups, if the wide
// store would wrap around the address space we would do a memory access at
// nullptr even without the transformation.
- if (Group->getNumMembers() == Group->getFactor())
+ if (Group->isFull())
continue;
// Interleave-store-group with gaps is implemented using masked wide store.
diff --git a/llvm/lib/CGData/StableFunctionMapRecord.cpp b/llvm/lib/CGData/StableFunctionMapRecord.cpp
index 4e4fcef..423e068 100644
--- a/llvm/lib/CGData/StableFunctionMapRecord.cpp
+++ b/llvm/lib/CGData/StableFunctionMapRecord.cpp
@@ -160,14 +160,18 @@ void StableFunctionMapRecord::deserialize(const unsigned char *&Ptr,
for (unsigned I = 0; I < NumFuncs; ++I) {
auto Hash =
endian::readNext<stable_hash, endianness::little, unaligned>(Ptr);
- auto FunctionNameId =
+ [[maybe_unused]] auto FunctionNameId =
endian::readNext<uint32_t, endianness::little, unaligned>(Ptr);
- assert(FunctionMap->getNameForId(FunctionNameId) &&
- "FunctionNameId out of range");
- auto ModuleNameId =
+ [[maybe_unused]] auto ModuleNameId =
endian::readNext<uint32_t, endianness::little, unaligned>(Ptr);
- assert(FunctionMap->getNameForId(ModuleNameId) &&
- "ModuleNameId out of range");
+ // Only validate IDs if we've read the names
+ if (ReadStableFunctionMapNames) {
+ assert(FunctionMap->getNameForId(FunctionNameId) &&
+ "FunctionNameId out of range");
+ assert(FunctionMap->getNameForId(ModuleNameId) &&
+ "ModuleNameId out of range");
+ }
+
auto InstCount =
endian::readNext<uint32_t, endianness::little, unaligned>(Ptr);
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 6166271..1641c3e 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -1654,6 +1654,88 @@ void AsmPrinter::emitStackUsage(const MachineFunction &MF) {
*StackUsageStream << "static\n";
}
+/// Extracts a generalized numeric type identifier of a Function's type from
+/// type metadata. Returns null if metadata cannot be found.
+static ConstantInt *extractNumericCGTypeId(const Function &F) {
+ SmallVector<MDNode *, 2> Types;
+ F.getMetadata(LLVMContext::MD_type, Types);
+ for (const auto &Type : Types) {
+ if (Type->hasGeneralizedMDString()) {
+ MDString *MDGeneralizedTypeId = cast<MDString>(Type->getOperand(1));
+ uint64_t TypeIdVal = llvm::MD5Hash(MDGeneralizedTypeId->getString());
+ IntegerType *Int64Ty = Type::getInt64Ty(F.getContext());
+ return ConstantInt::get(Int64Ty, TypeIdVal);
+ }
+ }
+ return nullptr;
+}
+
+/// Emits .callgraph section.
+void AsmPrinter::emitCallGraphSection(const MachineFunction &MF,
+ FunctionInfo &FuncInfo) {
+ if (!MF.getTarget().Options.EmitCallGraphSection)
+ return;
+
+ // Switch to the call graph section for the function
+ MCSection *FuncCGSection =
+ getObjFileLowering().getCallGraphSection(*getCurrentSection());
+ assert(FuncCGSection && "null callgraph section");
+ OutStreamer->pushSection();
+ OutStreamer->switchSection(FuncCGSection);
+
+ // Emit format version number.
+ OutStreamer->emitInt64(CallGraphSectionFormatVersion::V_0);
+
+ // Emit function's self information, which is composed of:
+ // 1) FunctionEntryPc
+ // 2) FunctionKind: Whether the function is indirect target, and if so,
+ // whether its type id is known.
+ // 3) FunctionTypeId: Emit only when the function is an indirect target
+ // and its type id is known.
+
+ // Emit function entry pc.
+ const MCSymbol *FunctionSymbol = getFunctionBegin();
+ OutStreamer->emitSymbolValue(FunctionSymbol, TM.getProgramPointerSize());
+
+ // If this function has external linkage or has its address taken and
+ // it is not a callback, then anything could call it.
+ const Function &F = MF.getFunction();
+ bool IsIndirectTarget =
+ !F.hasLocalLinkage() || F.hasAddressTaken(nullptr,
+ /*IgnoreCallbackUses=*/true,
+ /*IgnoreAssumeLikeCalls=*/true,
+ /*IgnoreLLVMUsed=*/false);
+
+ // FIXME: FunctionKind takes a few values but emitted as a 64-bit value.
+ // Can be optimized to occupy 2 bits instead.
+ // Emit function kind, and type id if available.
+ if (!IsIndirectTarget) {
+ OutStreamer->emitInt64(
+ static_cast<uint64_t>(FunctionInfo::FunctionKind::NOT_INDIRECT_TARGET));
+ } else {
+ if (const auto *TypeId = extractNumericCGTypeId(F)) {
+ OutStreamer->emitInt64(static_cast<uint64_t>(
+ FunctionInfo::FunctionKind::INDIRECT_TARGET_KNOWN_TID));
+ OutStreamer->emitInt64(TypeId->getZExtValue());
+ } else {
+ OutStreamer->emitInt64(static_cast<uint64_t>(
+ FunctionInfo::FunctionKind::INDIRECT_TARGET_UNKNOWN_TID));
+ }
+ }
+
+ // Emit callsite labels, where each element is a pair of type id and
+ // indirect callsite pc.
+ const auto &CallSiteLabels = FuncInfo.CallSiteLabels;
+ OutStreamer->emitInt64(CallSiteLabels.size());
+ for (const auto &[TypeId, Label] : CallSiteLabels) {
+ OutStreamer->emitInt64(TypeId);
+ OutStreamer->emitSymbolValue(Label, TM.getProgramPointerSize());
+ }
+ FuncInfo.CallSiteLabels.clear();
+
+ OutStreamer->popSection();
+}
+
void AsmPrinter::emitPCSectionsLabel(const MachineFunction &MF,
const MDNode &MD) {
MCSymbol *S = MF.getContext().createTempSymbol("pcsection");
@@ -1784,6 +1866,23 @@ static StringRef getMIMnemonic(const MachineInstr &MI, MCStreamer &Streamer) {
return Name;
}
+void AsmPrinter::emitIndirectCalleeLabels(
+ FunctionInfo &FuncInfo,
+ const MachineFunction::CallSiteInfoMap &CallSitesInfoMap,
+ const MachineInstr &MI) {
+ // Only indirect calls have type identifiers set.
+ const auto &CallSiteInfo = CallSitesInfoMap.find(&MI);
+ if (CallSiteInfo == CallSitesInfoMap.end())
+ return;
+
+ for (ConstantInt *CalleeTypeId : CallSiteInfo->second.CalleeTypeIds) {
+ MCSymbol *S = MF->getContext().createTempSymbol();
+ OutStreamer->emitLabel(S);
+ uint64_t CalleeTypeIdVal = CalleeTypeId->getZExtValue();
+ FuncInfo.CallSiteLabels.emplace_back(CalleeTypeIdVal, S);
+ }
+}
+
/// EmitFunctionBody - This method emits the body and trailer for a
/// function.
void AsmPrinter::emitFunctionBody() {
@@ -1830,6 +1929,8 @@ void AsmPrinter::emitFunctionBody() {
MBBSectionRanges[MF->front().getSectionID()] =
MBBSectionRange{CurrentFnBegin, nullptr};
+ FunctionInfo FuncInfo;
+ const auto &CallSitesInfoMap = MF->getCallSitesInfo();
for (auto &MBB : *MF) {
// Print a label for the basic block.
emitBasicBlockStart(MBB);
@@ -1963,6 +2064,9 @@ void AsmPrinter::emitFunctionBody() {
break;
}
+ if (TM.Options.EmitCallGraphSection && MI.isCall())
+ emitIndirectCalleeLabels(FuncInfo, CallSitesInfoMap, MI);
+
// If there is a post-instruction symbol, emit a label for it here.
if (MCSymbol *S = MI.getPostInstrSymbol())
OutStreamer->emitLabel(S);
@@ -2142,6 +2246,9 @@ void AsmPrinter::emitFunctionBody() {
// Emit section containing stack size metadata.
emitStackSizeSection(*MF);
+ // Emit section containing call graph metadata.
+ emitCallGraphSection(*MF, FuncInfo);
+
// Emit .su file containing function stack size information.
emitStackUsage(*MF);
@@ -2841,6 +2948,7 @@ void AsmPrinter::SetupMachineFunction(MachineFunction &MF) {
F.hasFnAttribute("xray-instruction-threshold") ||
needFuncLabels(MF, *this) || NeedsLocalForSize ||
MF.getTarget().Options.EmitStackSizeSection ||
+ MF.getTarget().Options.EmitCallGraphSection ||
MF.getTarget().Options.BBAddrMap) {
CurrentFnBegin = createTempSymbol("func_begin");
if (NeedsLocalForSize)
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 416c56d..f16283b 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -2769,6 +2769,29 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) {
return optimizeGatherScatterInst(II, II->getArgOperand(0));
case Intrinsic::masked_scatter:
return optimizeGatherScatterInst(II, II->getArgOperand(1));
+ case Intrinsic::masked_load:
+ // Treat v1X masked load as load X type.
+ if (auto *VT = dyn_cast<FixedVectorType>(II->getType())) {
+ if (VT->getNumElements() == 1) {
+ Value *PtrVal = II->getArgOperand(0);
+ unsigned AS = PtrVal->getType()->getPointerAddressSpace();
+ if (optimizeMemoryInst(II, PtrVal, VT->getElementType(), AS))
+ return true;
+ }
+ }
+ return false;
+ case Intrinsic::masked_store:
+ // Treat v1X masked store as store X type.
+ if (auto *VT =
+ dyn_cast<FixedVectorType>(II->getArgOperand(0)->getType())) {
+ if (VT->getNumElements() == 1) {
+ Value *PtrVal = II->getArgOperand(1);
+ unsigned AS = PtrVal->getType()->getPointerAddressSpace();
+ if (optimizeMemoryInst(II, PtrVal, VT->getElementType(), AS))
+ return true;
+ }
+ }
+ return false;
}
SmallVector<Value *, 2> PtrOps;
diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp
index 60d42e0..ec40f6a 100644
--- a/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/llvm/lib/CodeGen/MachineFunction.cpp
@@ -698,6 +698,26 @@ bool MachineFunction::needsFrameMoves() const {
!F.getParent()->debug_compile_units().empty();
}
+MachineFunction::CallSiteInfo::CallSiteInfo(const CallBase &CB) {
+ // Numeric callee_type ids are only for indirect calls.
+ if (!CB.isIndirectCall())
+ return;
+
+ MDNode *CalleeTypeList = CB.getMetadata(LLVMContext::MD_callee_type);
+ if (!CalleeTypeList)
+ return;
+
+ for (const MDOperand &Op : CalleeTypeList->operands()) {
+ MDNode *TypeMD = cast<MDNode>(Op);
+ MDString *TypeIdStr = cast<MDString>(TypeMD->getOperand(1));
+ // Compute numeric type id from generalized type id string
+ uint64_t TypeIdVal = MD5Hash(TypeIdStr->getString());
+ IntegerType *Int64Ty = Type::getInt64Ty(CB.getContext());
+ CalleeTypeIds.push_back(
+ ConstantInt::get(Int64Ty, TypeIdVal, /*IsSigned=*/false));
+ }
+}
+
namespace llvm {
template<>
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index 9d5c39c..c6fa8f4 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -3676,8 +3676,8 @@ void GenericScheduler::initialize(ScheduleDAGMI *dag) {
TopCand.SU = nullptr;
BotCand.SU = nullptr;
- TopCluster = nullptr;
- BotCluster = nullptr;
+ TopClusterID = InvalidClusterId;
+ BotClusterID = InvalidClusterId;
}
/// Initialize the per-region scheduling policy.
@@ -3988,10 +3988,14 @@ bool GenericScheduler::tryCandidate(SchedCandidate &Cand,
// This is a best effort to set things up for a post-RA pass. Optimizations
// like generating loads of multiple registers should ideally be done within
// the scheduler pass by combining the loads during DAG postprocessing.
- const ClusterInfo *CandCluster = Cand.AtTop ? TopCluster : BotCluster;
- const ClusterInfo *TryCandCluster = TryCand.AtTop ? TopCluster : BotCluster;
- if (tryGreater(TryCandCluster && TryCandCluster->contains(TryCand.SU),
- CandCluster && CandCluster->contains(Cand.SU), TryCand, Cand,
+ unsigned CandZoneCluster = Cand.AtTop ? TopClusterID : BotClusterID;
+ unsigned TryCandZoneCluster = TryCand.AtTop ? TopClusterID : BotClusterID;
+ bool CandIsClusterSucc =
+ isTheSameCluster(CandZoneCluster, Cand.SU->ParentClusterIdx);
+ bool TryCandIsClusterSucc =
+ isTheSameCluster(TryCandZoneCluster, TryCand.SU->ParentClusterIdx);
+
+ if (tryGreater(TryCandIsClusterSucc, CandIsClusterSucc, TryCand, Cand,
Cluster))
return TryCand.Reason != NoCand;
@@ -4251,24 +4255,30 @@ void GenericScheduler::reschedulePhysReg(SUnit *SU, bool isTop) {
void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
if (IsTopNode) {
SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
- TopCluster = DAG->getCluster(SU->ParentClusterIdx);
- LLVM_DEBUG(if (TopCluster) {
- dbgs() << " Top Cluster: ";
- for (auto *N : *TopCluster)
- dbgs() << N->NodeNum << '\t';
- dbgs() << '\n';
+ TopClusterID = SU->ParentClusterIdx;
+ LLVM_DEBUG({
+ if (TopClusterID != InvalidClusterId) {
+ ClusterInfo *TopCluster = DAG->getCluster(TopClusterID);
+ dbgs() << " Top Cluster: ";
+ for (auto *N : *TopCluster)
+ dbgs() << N->NodeNum << '\t';
+ dbgs() << '\n';
+ }
});
Top.bumpNode(SU);
if (SU->hasPhysRegUses)
reschedulePhysReg(SU, true);
} else {
SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
- BotCluster = DAG->getCluster(SU->ParentClusterIdx);
- LLVM_DEBUG(if (BotCluster) {
- dbgs() << " Bot Cluster: ";
- for (auto *N : *BotCluster)
- dbgs() << N->NodeNum << '\t';
- dbgs() << '\n';
+ BotClusterID = SU->ParentClusterIdx;
+ LLVM_DEBUG({
+ if (BotClusterID != InvalidClusterId) {
+ ClusterInfo *BotCluster = DAG->getCluster(BotClusterID);
+ dbgs() << " Bot Cluster: ";
+ for (auto *N : *BotCluster)
+ dbgs() << N->NodeNum << '\t';
+ dbgs() << '\n';
+ }
});
Bot.bumpNode(SU);
if (SU->hasPhysRegDefs)
@@ -4306,8 +4316,8 @@ void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
if (!Bot.HazardRec) {
Bot.HazardRec = DAG->TII->CreateTargetMIHazardRecognizer(Itin, DAG);
}
- TopCluster = nullptr;
- BotCluster = nullptr;
+ TopClusterID = InvalidClusterId;
+ BotClusterID = InvalidClusterId;
}
void PostGenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
@@ -4373,10 +4383,14 @@ bool PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
return TryCand.Reason != NoCand;
// Keep clustered nodes together.
- const ClusterInfo *CandCluster = Cand.AtTop ? TopCluster : BotCluster;
- const ClusterInfo *TryCandCluster = TryCand.AtTop ? TopCluster : BotCluster;
- if (tryGreater(TryCandCluster && TryCandCluster->contains(TryCand.SU),
- CandCluster && CandCluster->contains(Cand.SU), TryCand, Cand,
+ unsigned CandZoneCluster = Cand.AtTop ? TopClusterID : BotClusterID;
+ unsigned TryCandZoneCluster = TryCand.AtTop ? TopClusterID : BotClusterID;
+ bool CandIsClusterSucc =
+ isTheSameCluster(CandZoneCluster, Cand.SU->ParentClusterIdx);
+ bool TryCandIsClusterSucc =
+ isTheSameCluster(TryCandZoneCluster, TryCand.SU->ParentClusterIdx);
+
+ if (tryGreater(TryCandIsClusterSucc, CandIsClusterSucc, TryCand, Cand,
Cluster))
return TryCand.Reason != NoCand;
// Avoid critical resource consumption and balance the schedule.
@@ -4575,11 +4589,11 @@ SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
if (IsTopNode) {
SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
- TopCluster = DAG->getCluster(SU->ParentClusterIdx);
+ TopClusterID = SU->ParentClusterIdx;
Top.bumpNode(SU);
} else {
SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
- BotCluster = DAG->getCluster(SU->ParentClusterIdx);
+ BotClusterID = SU->ParentClusterIdx;
Bot.bumpNode(SU);
}
}
diff --git a/llvm/lib/CodeGen/RegAllocBase.cpp b/llvm/lib/CodeGen/RegAllocBase.cpp
index 69b9291..2400a1f 100644
--- a/llvm/lib/CodeGen/RegAllocBase.cpp
+++ b/llvm/lib/CodeGen/RegAllocBase.cpp
@@ -178,10 +178,8 @@ void RegAllocBase::cleanupFailedVReg(Register FailedReg, MCRegister PhysReg,
for (MCRegAliasIterator Aliases(PhysReg, TRI, true); Aliases.isValid();
++Aliases) {
for (MachineOperand &MO : MRI->reg_operands(*Aliases)) {
- if (MO.readsReg()) {
+ if (MO.readsReg())
MO.setIsUndef(true);
- LIS->removeAllRegUnitsForPhysReg(MO.getReg());
- }
}
}
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index a43020e..5989c1d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -331,6 +331,11 @@ namespace {
return CombineTo(N, To, 2, AddTo);
}
+ SDValue CombineTo(SDNode *N, SmallVectorImpl<SDValue> *To,
+ bool AddTo = true) {
+ return CombineTo(N, To->data(), To->size(), AddTo);
+ }
+
void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO);
private:
@@ -541,6 +546,7 @@ namespace {
SDValue visitEXTRACT_VECTOR_ELT(SDNode *N);
SDValue visitBUILD_VECTOR(SDNode *N);
SDValue visitCONCAT_VECTORS(SDNode *N);
+ SDValue visitVECTOR_INTERLEAVE(SDNode *N);
SDValue visitEXTRACT_SUBVECTOR(SDNode *N);
SDValue visitVECTOR_SHUFFLE(SDNode *N);
SDValue visitSCALAR_TO_VECTOR(SDNode *N);
@@ -2021,6 +2027,7 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N);
case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N);
case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N);
+ case ISD::VECTOR_INTERLEAVE: return visitVECTOR_INTERLEAVE(N);
case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N);
case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N);
case ISD::SCALAR_TO_VECTOR: return visitSCALAR_TO_VECTOR(N);
@@ -4100,18 +4107,17 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
// (sub x, ([v]select (uge x, y), y, 0)) -> (umin x, (sub x, y))
if (N1.hasOneUse() && hasUMin(VT)) {
SDValue Y;
- if (sd_match(N1, m_Select(m_SetCC(m_Specific(N0), m_Value(Y),
- m_SpecificCondCode(ISD::SETULT)),
- m_Zero(), m_Deferred(Y))) ||
- sd_match(N1, m_Select(m_SetCC(m_Specific(N0), m_Value(Y),
- m_SpecificCondCode(ISD::SETUGE)),
- m_Deferred(Y), m_Zero())) ||
- sd_match(N1, m_VSelect(m_SetCC(m_Specific(N0), m_Value(Y),
- m_SpecificCondCode(ISD::SETULT)),
- m_Zero(), m_Deferred(Y))) ||
- sd_match(N1, m_VSelect(m_SetCC(m_Specific(N0), m_Value(Y),
- m_SpecificCondCode(ISD::SETUGE)),
- m_Deferred(Y), m_Zero())))
+ auto MS0 = m_Specific(N0);
+ auto MVY = m_Value(Y);
+ auto MZ = m_Zero();
+ auto MCC1 = m_SpecificCondCode(ISD::SETULT);
+ auto MCC2 = m_SpecificCondCode(ISD::SETUGE);
+
+ if (sd_match(N1, m_SelectCCLike(MS0, MVY, MZ, m_Deferred(Y), MCC1)) ||
+ sd_match(N1, m_SelectCCLike(MS0, MVY, m_Deferred(Y), MZ, MCC2)) ||
+ sd_match(N1, m_VSelect(m_SetCC(MS0, MVY, MCC1), MZ, m_Deferred(Y))) ||
+ sd_match(N1, m_VSelect(m_SetCC(MS0, MVY, MCC2), m_Deferred(Y), MZ)))
+
return DAG.getNode(ISD::UMIN, DL, VT, N0,
DAG.getNode(ISD::SUB, DL, VT, N0, Y));
}
@@ -10616,6 +10622,19 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
return DAG.getVScale(DL, VT, C0 << C1);
}
+ SDValue X;
+ APInt VS0;
+
+ // fold (shl (X * vscale(VS0)), C1) -> (X * vscale(VS0 << C1))
+ if (N1C && sd_match(N0, m_Mul(m_Value(X), m_VScale(m_ConstInt(VS0))))) {
+ SDNodeFlags Flags;
+ Flags.setNoUnsignedWrap(N->getFlags().hasNoUnsignedWrap() &&
+ N0->getFlags().hasNoUnsignedWrap());
+
+ SDValue VScale = DAG.getVScale(DL, VT, VS0 << N1C->getAPIntValue());
+ return DAG.getNode(ISD::MUL, DL, VT, X, VScale, Flags);
+ }
+
// Fold (shl step_vector(C0), C1) to (step_vector(C0 << C1)).
APInt ShlVal;
if (N0.getOpcode() == ISD::STEP_VECTOR &&
@@ -25282,6 +25301,28 @@ static SDValue combineConcatVectorOfShuffleAndItsOperands(
return DAG.getVectorShuffle(VT, dl, ShufOps[0], ShufOps[1], Mask);
}
+static SDValue combineConcatVectorOfSplats(SDNode *N, SelectionDAG &DAG,
+ const TargetLowering &TLI,
+ bool LegalTypes,
+ bool LegalOperations) {
+ EVT VT = N->getValueType(0);
+
+ // Post-legalization we can only create wider SPLAT_VECTOR operations if both
+ // the type and operation is legal. The Hexagon target has custom
+ // legalization for SPLAT_VECTOR that splits the operation into two parts and
+ // concatenates them. Therefore, custom lowering must also be rejected in
+ // order to avoid an infinite loop.
+ if ((LegalTypes && !TLI.isTypeLegal(VT)) ||
+ (LegalOperations && !TLI.isOperationLegal(ISD::SPLAT_VECTOR, VT)))
+ return SDValue();
+
+ SDValue Op0 = N->getOperand(0);
+ if (!llvm::all_equal(N->op_values()) || Op0.getOpcode() != ISD::SPLAT_VECTOR)
+ return SDValue();
+
+ return DAG.getNode(ISD::SPLAT_VECTOR, SDLoc(N), VT, Op0.getOperand(0));
+}
+
SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
// If we only have one input vector, we don't need to do any concatenation.
if (N->getNumOperands() == 1)
@@ -25405,6 +25446,10 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
return DAG.getBuildVector(VT, SDLoc(N), Opnds);
}
+ if (SDValue V =
+ combineConcatVectorOfSplats(N, DAG, TLI, LegalTypes, LegalOperations))
+ return V;
+
// Fold CONCAT_VECTORS of only bitcast scalars (or undef) to BUILD_VECTOR.
// FIXME: Add support for concat_vectors(bitcast(vec0),bitcast(vec1),...).
if (SDValue V = combineConcatVectorOfScalars(N, DAG))
@@ -25473,6 +25518,21 @@ SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
return SDValue();
}
+SDValue DAGCombiner::visitVECTOR_INTERLEAVE(SDNode *N) {
+ // Check to see if all operands are identical.
+ if (!llvm::all_equal(N->op_values()))
+ return SDValue();
+
+ // Check to see if the identical operand is a splat.
+ if (!DAG.isSplatValue(N->getOperand(0)))
+ return SDValue();
+
+ // interleave splat(X), splat(X).... --> splat(X), splat(X)....
+ SmallVector<SDValue, 4> Ops;
+ Ops.append(N->op_values().begin(), N->op_values().end());
+ return CombineTo(N, &Ops);
+}
+
// Helper that peeks through INSERT_SUBVECTOR/CONCAT_VECTORS to find
// if the subvector can be sourced for free.
static SDValue getSubVectorSrc(SDValue V, unsigned Index, EVT SubVT) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index 6a2e782..31e7855 100644
--- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -888,7 +888,8 @@ EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
}
if (MI->isCandidateForAdditionalCallInfo()) {
- if (DAG->getTarget().Options.EmitCallSiteInfo)
+ if (DAG->getTarget().Options.EmitCallSiteInfo ||
+ DAG->getTarget().Options.EmitCallGraphSection)
MF.addCallSiteInfo(MI, DAG->getCallSiteInfo(Node));
if (auto CalledGlobal = DAG->getCalledGlobal(Node))
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 02d1100..f41b6eb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -12782,7 +12782,7 @@ bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
return Seen;
}
-/// isOperand - Return true if this node is an operand of N.
+/// Return true if the referenced return value is an operand of N.
bool SDValue::isOperandOf(const SDNode *N) const {
return is_contained(N->op_values(), *this);
}
diff --git a/llvm/lib/CodeGen/TailDuplicator.cpp b/llvm/lib/CodeGen/TailDuplicator.cpp
index a88c57f..5d720fb 100644
--- a/llvm/lib/CodeGen/TailDuplicator.cpp
+++ b/llvm/lib/CodeGen/TailDuplicator.cpp
@@ -604,12 +604,21 @@ bool TailDuplicator::shouldTailDuplicate(bool IsSimple,
bool HasComputedGoto = false;
if (!TailBB.empty()) {
HasIndirectbr = TailBB.back().isIndirectBranch();
- HasComputedGoto = TailBB.terminatorIsComputedGoto();
+ HasComputedGoto = TailBB.terminatorIsComputedGotoWithSuccessors();
}
if (HasIndirectbr && PreRegAlloc)
MaxDuplicateCount = TailDupIndirectBranchSize;
+ // Allow higher limits when the block has computed-gotos and running after
+ // register allocation. NB. This basically unfactors computed gotos that were
+ // factored early on in the compilation process to speed up edge based data
+ // flow. If we do not unfactor them again, it can seriously pessimize code
+ // with many computed jumps in the source code, such as interpreters.
+ // Therefore we do not restrict the computed gotos.
+ if (HasComputedGoto && !PreRegAlloc)
+ MaxDuplicateCount = std::max(MaxDuplicateCount, 10u);
+
// Check the instructions in the block to determine whether tail-duplication
// is invalid or unlikely to be profitable.
unsigned InstrCount = 0;
@@ -663,12 +672,7 @@ bool TailDuplicator::shouldTailDuplicate(bool IsSimple,
// Duplicating a BB which has both multiple predecessors and successors will
// may cause huge amount of PHI nodes. If we want to remove this limitation,
// we have to address https://github.com/llvm/llvm-project/issues/78578.
- // NB. This basically unfactors computed gotos that were factored early on in
- // the compilation process to speed up edge based data flow. If we do not
- // unfactor them again, it can seriously pessimize code with many computed
- // jumps in the source code, such as interpreters. Therefore we do not
- // restrict the computed gotos.
- if (!HasComputedGoto && TailBB.pred_size() > TailDupPredSize &&
+ if (PreRegAlloc && TailBB.pred_size() > TailDupPredSize &&
TailBB.succ_size() > TailDupSuccSize) {
// If TailBB or any of its successors contains a phi, we may have to add a
// large number of additional phis with additional incoming values.
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp
index 18d6bbc..705e046e 100644
--- a/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -1406,7 +1406,7 @@ void TargetInstrInfo::reassociateOps(
const MCInstrDesc &MCID, Register DestReg) {
return MachineInstrBuilder(
MF, MF.CreateMachineInstr(MCID, MIMD.getDL(), /*NoImpl=*/true))
- .setPCSections(MIMD.getPCSections())
+ .copyMIMetadata(MIMD)
.addReg(DestReg, RegState::Define);
};
diff --git a/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp b/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp
index 222dc88..6ddb12b 100644
--- a/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp
+++ b/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp
@@ -413,6 +413,117 @@ static bool isTlsAddressCode(uint8_t DW_OP_Code) {
DW_OP_Code == dwarf::DW_OP_GNU_push_tls_address;
}
+static void constructSeqOffsettoOrigRowMapping(
+ CompileUnit &Unit, const DWARFDebugLine::LineTable &LT,
+ DenseMap<uint64_t, unsigned> &SeqOffToOrigRow) {
+
+ // Use std::map for ordered iteration.
+ std::map<uint64_t, unsigned> LineTableMapping;
+
+ // First, trust the sequences that the DWARF parser did identify.
+ for (const DWARFDebugLine::Sequence &Seq : LT.Sequences)
+ LineTableMapping[Seq.StmtSeqOffset] = Seq.FirstRowIndex;
+
+ // Second, manually find sequence boundaries and match them to the
+ // sorted attributes to handle sequences the parser might have missed.
+ auto StmtAttrs = Unit.getStmtSeqListAttributes();
+ llvm::sort(StmtAttrs, [](const PatchLocation &A, const PatchLocation &B) {
+ return A.get() < B.get();
+ });
+
+ std::vector<unsigned> SeqStartRows;
+ SeqStartRows.push_back(0);
+ for (auto [I, Row] : llvm::enumerate(ArrayRef(LT.Rows).drop_back()))
+ if (Row.EndSequence)
+ SeqStartRows.push_back(I + 1);
+
+ // While SeqOffToOrigRow parsed from CU could be the ground truth,
+ // e.g.
+ //
+ // SeqOff Row
+ // 0x08 9
+ // 0x14 15
+ //
+ // The StmtAttrs and SeqStartRows may not match perfectly, e.g.
+ //
+ // StmtAttrs SeqStartRows
+ // 0x04 3
+ // 0x08 5
+ // 0x10 9
+ // 0x12 11
+ // 0x14 15
+ //
+ // In this case, we don't want to assign 5 to 0x08, since we know 0x08
+ // maps to 9. If we do a dummy 1:1 mapping 0x10 will be mapped to 9
+ // which is incorrect. The expected behavior is ignore 5, realign the
+ // table based on the result from the line table:
+ //
+ // StmtAttrs SeqStartRows
+ // 0x04 3
+ // -- 5
+ // 0x08 9 <- LineTableMapping ground truth
+ // 0x10 11
+ // 0x12 --
+ // 0x14 15 <- LineTableMapping ground truth
+
+ ArrayRef StmtAttrsRef(StmtAttrs);
+ ArrayRef SeqStartRowsRef(SeqStartRows);
+
+ // Dummy last element to make sure StmtAttrsRef and SeqStartRowsRef always
+ // run out first.
+ constexpr uint64_t DummyKey = UINT64_MAX;
+ constexpr unsigned DummyVal = UINT32_MAX;
+ LineTableMapping[DummyKey] = DummyVal;
+
+ for (auto [NextSeqOff, NextRow] : LineTableMapping) {
+ // Explict capture to avoid capturing structured bindings and make C++17
+ // happy.
+ auto StmtAttrSmallerThanNext = [N = NextSeqOff](const PatchLocation &SA) {
+ return SA.get() < N;
+ };
+ auto SeqStartSmallerThanNext = [N = NextRow](const unsigned &Row) {
+ return Row < N;
+ };
+ // If both StmtAttrs and SeqStartRows points to value not in
+ // the LineTableMapping yet, we do a dummy one to one mapping and
+ // move the pointer.
+ while (!StmtAttrsRef.empty() && !SeqStartRowsRef.empty() &&
+ StmtAttrSmallerThanNext(StmtAttrsRef.front()) &&
+ SeqStartSmallerThanNext(SeqStartRowsRef.front())) {
+ SeqOffToOrigRow[StmtAttrsRef.consume_front().get()] =
+ SeqStartRowsRef.consume_front();
+ }
+ // One of the pointer points to the value at or past Next in the
+ // LineTableMapping, We move the pointer to re-align with the
+ // LineTableMapping
+ StmtAttrsRef = StmtAttrsRef.drop_while(StmtAttrSmallerThanNext);
+ SeqStartRowsRef = SeqStartRowsRef.drop_while(SeqStartSmallerThanNext);
+ // Use the LineTableMapping's result as the ground truth and move
+ // on.
+ if (NextSeqOff != DummyKey) {
+ SeqOffToOrigRow[NextSeqOff] = NextRow;
+ }
+ // Move the pointers if they are pointed at Next.
+ // It is possible that they point to later entries in LineTableMapping.
+ // Therefore we only increment the pointers after we validate they are
+ // pointing to the `Next` entry. e.g.
+ //
+ // LineTableMapping
+ // SeqOff Row
+ // 0x08 9 <- NextSeqOff/NextRow
+ // 0x14 15
+ //
+ // StmtAttrs SeqStartRows
+ // 0x14 13 <- StmtAttrsRef.front() / SeqStartRowsRef.front()
+ // 0x16 15
+ // -- 17
+ if (!StmtAttrsRef.empty() && StmtAttrsRef.front().get() == NextSeqOff)
+ StmtAttrsRef.consume_front();
+ if (!SeqStartRowsRef.empty() && SeqStartRowsRef.front() == NextRow)
+ SeqStartRowsRef.consume_front();
+ }
+}
+
std::pair<bool, std::optional<int64_t>>
DWARFLinker::getVariableRelocAdjustment(AddressesMap &RelocMgr,
const DWARFDie &DIE) {
@@ -2297,8 +2408,12 @@ void DWARFLinker::DIECloner::generateLineTableForUnit(CompileUnit &Unit) {
// Create a map of stmt sequence offsets to original row indices.
DenseMap<uint64_t, unsigned> SeqOffToOrigRow;
- for (const DWARFDebugLine::Sequence &Seq : LT->Sequences)
- SeqOffToOrigRow[Seq.StmtSeqOffset] = Seq.FirstRowIndex;
+ // The DWARF parser's discovery of sequences can be incomplete. To
+ // ensure all DW_AT_LLVM_stmt_sequence attributes can be patched, we
+ // build a map from both the parser's results and a manual
+ // reconstruction.
+ if (!LT->Rows.empty())
+ constructSeqOffsettoOrigRowMapping(Unit, *LT, SeqOffToOrigRow);
// Create a map of original row indices to new row indices.
DenseMap<size_t, size_t> OrigRowToNewRow;
diff --git a/llvm/lib/Frontend/HLSL/CMakeLists.txt b/llvm/lib/Frontend/HLSL/CMakeLists.txt
index 5343469..3d22577 100644
--- a/llvm/lib/Frontend/HLSL/CMakeLists.txt
+++ b/llvm/lib/Frontend/HLSL/CMakeLists.txt
@@ -1,5 +1,6 @@
add_llvm_component_library(LLVMFrontendHLSL
CBuffer.cpp
+ HLSLBinding.cpp
HLSLResource.cpp
HLSLRootSignature.cpp
RootSignatureMetadata.cpp
diff --git a/llvm/lib/Frontend/HLSL/HLSLBinding.cpp b/llvm/lib/Frontend/HLSL/HLSLBinding.cpp
new file mode 100644
index 0000000..d581311
--- /dev/null
+++ b/llvm/lib/Frontend/HLSL/HLSLBinding.cpp
@@ -0,0 +1,142 @@
+//===- HLSLBinding.cpp - Representation for resource bindings in HLSL -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Frontend/HLSL/HLSLBinding.h"
+#include "llvm/ADT/STLExtras.h"
+
+using namespace llvm;
+using namespace hlsl;
+
+std::optional<uint32_t>
+BindingInfo::findAvailableBinding(dxil::ResourceClass RC, uint32_t Space,
+ int32_t Size) {
+ BindingSpaces &BS = getBindingSpaces(RC);
+ RegisterSpace &RS = BS.getOrInsertSpace(Space);
+ return RS.findAvailableBinding(Size);
+}
+
+BindingInfo::RegisterSpace &
+BindingInfo::BindingSpaces::getOrInsertSpace(uint32_t Space) {
+ for (auto It = Spaces.begin(), End = Spaces.end(); It != End; ++It) {
+ if (It->Space == Space)
+ return *It;
+ if (It->Space < Space)
+ continue;
+ return *Spaces.insert(It, Space);
+ }
+ return Spaces.emplace_back(Space);
+}
+
+std::optional<uint32_t>
+BindingInfo::RegisterSpace::findAvailableBinding(int32_t Size) {
+ assert((Size == -1 || Size > 0) && "invalid size");
+
+ if (FreeRanges.empty())
+ return std::nullopt;
+
+ // unbounded array
+ if (Size == -1) {
+ BindingRange &Last = FreeRanges.back();
+ if (Last.UpperBound != ~0u)
+ // this space is already occupied by an unbounded array
+ return std::nullopt;
+ uint32_t RegSlot = Last.LowerBound;
+ FreeRanges.pop_back();
+ return RegSlot;
+ }
+
+ // single resource or fixed-size array
+ for (BindingRange &R : FreeRanges) {
+ // compare the size as uint64_t to prevent overflow for range (0, ~0u)
+ if ((uint64_t)R.UpperBound - R.LowerBound + 1 < (uint64_t)Size)
+ continue;
+ uint32_t RegSlot = R.LowerBound;
+ // This might create a range where (LowerBound == UpperBound + 1). When
+ // that happens, the next time this function is called the range will
+ // skipped over by the check above (at this point Size is always > 0).
+ R.LowerBound += Size;
+ return RegSlot;
+ }
+
+ return std::nullopt;
+}
+
+BindingInfo BindingInfoBuilder::calculateBindingInfo(
+ llvm::function_ref<void(const BindingInfoBuilder &Builder,
+ const Binding &Overlapping)>
+ ReportOverlap) {
+ // sort all the collected bindings
+ llvm::stable_sort(Bindings);
+
+ // remove duplicates
+ Binding *NewEnd = llvm::unique(Bindings);
+ if (NewEnd != Bindings.end())
+ Bindings.erase(NewEnd);
+
+ BindingInfo Info;
+
+ // Go over the sorted bindings and build up lists of free register ranges
+ // for each binding type and used spaces. Bindings are sorted by resource
+ // class, space, and lower bound register slot.
+ BindingInfo::BindingSpaces *BS =
+ &Info.getBindingSpaces(dxil::ResourceClass::SRV);
+ for (const Binding &B : Bindings) {
+ if (BS->RC != B.RC)
+ // move to the next resource class spaces
+ BS = &Info.getBindingSpaces(B.RC);
+
+ BindingInfo::RegisterSpace *S = BS->Spaces.empty()
+ ? &BS->Spaces.emplace_back(B.Space)
+ : &BS->Spaces.back();
+ assert(S->Space <= B.Space && "bindings not sorted correctly?");
+ if (B.Space != S->Space)
+ // add new space
+ S = &BS->Spaces.emplace_back(B.Space);
+
+ // The space is full - there are no free slots left, or the rest of the
+ // slots are taken by an unbounded array. Report the overlapping to the
+ // caller.
+ if (S->FreeRanges.empty() || S->FreeRanges.back().UpperBound < ~0u) {
+ ReportOverlap(*this, B);
+ continue;
+ }
+ // adjust the last free range lower bound, split it in two, or remove it
+ BindingInfo::BindingRange &LastFreeRange = S->FreeRanges.back();
+ if (LastFreeRange.LowerBound == B.LowerBound) {
+ if (B.UpperBound < ~0u)
+ LastFreeRange.LowerBound = B.UpperBound + 1;
+ else
+ S->FreeRanges.pop_back();
+ } else if (LastFreeRange.LowerBound < B.LowerBound) {
+ LastFreeRange.UpperBound = B.LowerBound - 1;
+ if (B.UpperBound < ~0u)
+ S->FreeRanges.emplace_back(B.UpperBound + 1, ~0u);
+ } else {
+ // We don't have room here. Report the overlapping binding to the caller
+ // and mark any extra space this binding would use as unavailable.
+ ReportOverlap(*this, B);
+ if (B.UpperBound < ~0u)
+ LastFreeRange.LowerBound =
+ std::max(LastFreeRange.LowerBound, B.UpperBound + 1);
+ else
+ S->FreeRanges.pop_back();
+ }
+ }
+
+ return Info;
+}
+
+const BindingInfoBuilder::Binding &BindingInfoBuilder::findOverlapping(
+ const BindingInfoBuilder::Binding &ReportedBinding) const {
+ for (const BindingInfoBuilder::Binding &Other : Bindings)
+ if (ReportedBinding.LowerBound <= Other.UpperBound &&
+ Other.LowerBound <= ReportedBinding.UpperBound)
+ return Other;
+
+ llvm_unreachable("Searching for overlap for binding that does not overlap");
+}
diff --git a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp
index 53f5934..48ff1ca 100644
--- a/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp
+++ b/llvm/lib/Frontend/HLSL/RootSignatureMetadata.cpp
@@ -13,15 +13,21 @@
#include "llvm/Frontend/HLSL/RootSignatureMetadata.h"
#include "llvm/Frontend/HLSL/RootSignatureValidations.h"
-#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/ScopedPrinter.h"
+using namespace llvm;
+
namespace llvm {
namespace hlsl {
namespace rootsig {
+char GenericRSMetadataError::ID;
+char InvalidRSMetadataFormat::ID;
+char InvalidRSMetadataValue::ID;
+template <typename T> char RootSignatureValidationError<T>::ID;
+
static std::optional<uint32_t> extractMdIntValue(MDNode *Node,
unsigned int OpId) {
if (auto *CI =
@@ -45,19 +51,6 @@ static std::optional<StringRef> extractMdStringValue(MDNode *Node,
return NodeText->getString();
}
-static bool reportError(LLVMContext *Ctx, Twine Message,
- DiagnosticSeverity Severity = DS_Error) {
- Ctx->diagnose(DiagnosticInfoGeneric(Message, Severity));
- return true;
-}
-
-static bool reportValueError(LLVMContext *Ctx, Twine ParamName,
- uint32_t Value) {
- Ctx->diagnose(DiagnosticInfoGeneric(
- "Invalid value for " + ParamName + ": " + Twine(Value), DS_Error));
- return true;
-}
-
static const EnumEntry<dxil::ResourceClass> ResourceClassNames[] = {
{"CBV", dxil::ResourceClass::CBuffer},
{"SRV", dxil::ResourceClass::SRV},
@@ -120,7 +113,7 @@ MDNode *MetadataBuilder::BuildRootFlags(const dxbc::RootFlags &Flags) {
IRBuilder<> Builder(Ctx);
Metadata *Operands[] = {
MDString::get(Ctx, "RootFlags"),
- ConstantAsMetadata::get(Builder.getInt32(llvm::to_underlying(Flags))),
+ ConstantAsMetadata::get(Builder.getInt32(to_underlying(Flags))),
};
return MDNode::get(Ctx, Operands);
}
@@ -130,7 +123,7 @@ MDNode *MetadataBuilder::BuildRootConstants(const RootConstants &Constants) {
Metadata *Operands[] = {
MDString::get(Ctx, "RootConstants"),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Constants.Visibility))),
+ Builder.getInt32(to_underlying(Constants.Visibility))),
ConstantAsMetadata::get(Builder.getInt32(Constants.Reg.Number)),
ConstantAsMetadata::get(Builder.getInt32(Constants.Space)),
ConstantAsMetadata::get(Builder.getInt32(Constants.Num32BitConstants)),
@@ -140,18 +133,18 @@ MDNode *MetadataBuilder::BuildRootConstants(const RootConstants &Constants) {
MDNode *MetadataBuilder::BuildRootDescriptor(const RootDescriptor &Descriptor) {
IRBuilder<> Builder(Ctx);
- std::optional<StringRef> ResName = getResourceName(
- dxil::ResourceClass(llvm::to_underlying(Descriptor.Type)));
+ std::optional<StringRef> ResName =
+ getResourceName(dxil::ResourceClass(to_underlying(Descriptor.Type)));
assert(ResName && "Provided an invalid Resource Class");
- llvm::SmallString<7> Name({"Root", *ResName});
+ SmallString<7> Name({"Root", *ResName});
Metadata *Operands[] = {
MDString::get(Ctx, Name),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Descriptor.Visibility))),
+ Builder.getInt32(to_underlying(Descriptor.Visibility))),
ConstantAsMetadata::get(Builder.getInt32(Descriptor.Reg.Number)),
ConstantAsMetadata::get(Builder.getInt32(Descriptor.Space)),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Descriptor.Flags))),
+ Builder.getInt32(to_underlying(Descriptor.Flags))),
};
return MDNode::get(Ctx, Operands);
}
@@ -162,7 +155,7 @@ MDNode *MetadataBuilder::BuildDescriptorTable(const DescriptorTable &Table) {
// Set the mandatory arguments
TableOperands.push_back(MDString::get(Ctx, "DescriptorTable"));
TableOperands.push_back(ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Table.Visibility))));
+ Builder.getInt32(to_underlying(Table.Visibility))));
// Remaining operands are references to the table's clauses. The in-memory
// representation of the Root Elements created from parsing will ensure that
@@ -182,7 +175,7 @@ MDNode *MetadataBuilder::BuildDescriptorTableClause(
const DescriptorTableClause &Clause) {
IRBuilder<> Builder(Ctx);
std::optional<StringRef> ResName =
- getResourceName(dxil::ResourceClass(llvm::to_underlying(Clause.Type)));
+ getResourceName(dxil::ResourceClass(to_underlying(Clause.Type)));
assert(ResName && "Provided an invalid Resource Class");
Metadata *Operands[] = {
MDString::get(Ctx, *ResName),
@@ -190,8 +183,7 @@ MDNode *MetadataBuilder::BuildDescriptorTableClause(
ConstantAsMetadata::get(Builder.getInt32(Clause.Reg.Number)),
ConstantAsMetadata::get(Builder.getInt32(Clause.Space)),
ConstantAsMetadata::get(Builder.getInt32(Clause.Offset)),
- ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Clause.Flags))),
+ ConstantAsMetadata::get(Builder.getInt32(to_underlying(Clause.Flags))),
};
return MDNode::get(Ctx, Operands);
}
@@ -200,108 +192,102 @@ MDNode *MetadataBuilder::BuildStaticSampler(const StaticSampler &Sampler) {
IRBuilder<> Builder(Ctx);
Metadata *Operands[] = {
MDString::get(Ctx, "StaticSampler"),
+ ConstantAsMetadata::get(Builder.getInt32(to_underlying(Sampler.Filter))),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Sampler.Filter))),
+ Builder.getInt32(to_underlying(Sampler.AddressU))),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Sampler.AddressU))),
+ Builder.getInt32(to_underlying(Sampler.AddressV))),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Sampler.AddressV))),
+ Builder.getInt32(to_underlying(Sampler.AddressW))),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Sampler.AddressW))),
- ConstantAsMetadata::get(llvm::ConstantFP::get(llvm::Type::getFloatTy(Ctx),
- Sampler.MipLODBias)),
+ ConstantFP::get(Type::getFloatTy(Ctx), Sampler.MipLODBias)),
ConstantAsMetadata::get(Builder.getInt32(Sampler.MaxAnisotropy)),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Sampler.CompFunc))),
+ Builder.getInt32(to_underlying(Sampler.CompFunc))),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Sampler.BorderColor))),
+ Builder.getInt32(to_underlying(Sampler.BorderColor))),
ConstantAsMetadata::get(
- llvm::ConstantFP::get(llvm::Type::getFloatTy(Ctx), Sampler.MinLOD)),
+ ConstantFP::get(Type::getFloatTy(Ctx), Sampler.MinLOD)),
ConstantAsMetadata::get(
- llvm::ConstantFP::get(llvm::Type::getFloatTy(Ctx), Sampler.MaxLOD)),
+ ConstantFP::get(Type::getFloatTy(Ctx), Sampler.MaxLOD)),
ConstantAsMetadata::get(Builder.getInt32(Sampler.Reg.Number)),
ConstantAsMetadata::get(Builder.getInt32(Sampler.Space)),
ConstantAsMetadata::get(
- Builder.getInt32(llvm::to_underlying(Sampler.Visibility))),
+ Builder.getInt32(to_underlying(Sampler.Visibility))),
};
return MDNode::get(Ctx, Operands);
}
-bool MetadataParser::parseRootFlags(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD,
- MDNode *RootFlagNode) {
-
+Error MetadataParser::parseRootFlags(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *RootFlagNode) {
if (RootFlagNode->getNumOperands() != 2)
- return reportError(Ctx, "Invalid format for RootFlag Element");
+ return make_error<InvalidRSMetadataFormat>("RootFlag Element");
if (std::optional<uint32_t> Val = extractMdIntValue(RootFlagNode, 1))
RSD.Flags = *Val;
else
- return reportError(Ctx, "Invalid value for RootFlag");
+ return make_error<InvalidRSMetadataValue>("RootFlag");
- return false;
+ return Error::success();
}
-bool MetadataParser::parseRootConstants(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD,
- MDNode *RootConstantNode) {
-
+Error MetadataParser::parseRootConstants(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *RootConstantNode) {
if (RootConstantNode->getNumOperands() != 5)
- return reportError(Ctx, "Invalid format for RootConstants Element");
+ return make_error<InvalidRSMetadataFormat>("RootConstants Element");
dxbc::RTS0::v1::RootParameterHeader Header;
// The parameter offset doesn't matter here - we recalculate it during
// serialization Header.ParameterOffset = 0;
- Header.ParameterType =
- llvm::to_underlying(dxbc::RootParameterType::Constants32Bit);
+ Header.ParameterType = to_underlying(dxbc::RootParameterType::Constants32Bit);
if (std::optional<uint32_t> Val = extractMdIntValue(RootConstantNode, 1))
Header.ShaderVisibility = *Val;
else
- return reportError(Ctx, "Invalid value for ShaderVisibility");
+ return make_error<InvalidRSMetadataValue>("ShaderVisibility");
dxbc::RTS0::v1::RootConstants Constants;
if (std::optional<uint32_t> Val = extractMdIntValue(RootConstantNode, 2))
Constants.ShaderRegister = *Val;
else
- return reportError(Ctx, "Invalid value for ShaderRegister");
+ return make_error<InvalidRSMetadataValue>("ShaderRegister");
if (std::optional<uint32_t> Val = extractMdIntValue(RootConstantNode, 3))
Constants.RegisterSpace = *Val;
else
- return reportError(Ctx, "Invalid value for RegisterSpace");
+ return make_error<InvalidRSMetadataValue>("RegisterSpace");
if (std::optional<uint32_t> Val = extractMdIntValue(RootConstantNode, 4))
Constants.Num32BitValues = *Val;
else
- return reportError(Ctx, "Invalid value for Num32BitValues");
+ return make_error<InvalidRSMetadataValue>("Num32BitValues");
RSD.ParametersContainer.addParameter(Header, Constants);
- return false;
+ return Error::success();
}
-bool MetadataParser::parseRootDescriptors(
- LLVMContext *Ctx, mcdxbc::RootSignatureDesc &RSD,
- MDNode *RootDescriptorNode, RootSignatureElementKind ElementKind) {
+Error MetadataParser::parseRootDescriptors(
+ mcdxbc::RootSignatureDesc &RSD, MDNode *RootDescriptorNode,
+ RootSignatureElementKind ElementKind) {
assert(ElementKind == RootSignatureElementKind::SRV ||
ElementKind == RootSignatureElementKind::UAV ||
ElementKind == RootSignatureElementKind::CBV &&
"parseRootDescriptors should only be called with RootDescriptor "
"element kind.");
if (RootDescriptorNode->getNumOperands() != 5)
- return reportError(Ctx, "Invalid format for Root Descriptor Element");
+ return make_error<InvalidRSMetadataFormat>("Root Descriptor Element");
dxbc::RTS0::v1::RootParameterHeader Header;
switch (ElementKind) {
case RootSignatureElementKind::SRV:
- Header.ParameterType = llvm::to_underlying(dxbc::RootParameterType::SRV);
+ Header.ParameterType = to_underlying(dxbc::RootParameterType::SRV);
break;
case RootSignatureElementKind::UAV:
- Header.ParameterType = llvm::to_underlying(dxbc::RootParameterType::UAV);
+ Header.ParameterType = to_underlying(dxbc::RootParameterType::UAV);
break;
case RootSignatureElementKind::CBV:
- Header.ParameterType = llvm::to_underlying(dxbc::RootParameterType::CBV);
+ Header.ParameterType = to_underlying(dxbc::RootParameterType::CBV);
break;
default:
llvm_unreachable("invalid Root Descriptor kind");
@@ -311,40 +297,38 @@ bool MetadataParser::parseRootDescriptors(
if (std::optional<uint32_t> Val = extractMdIntValue(RootDescriptorNode, 1))
Header.ShaderVisibility = *Val;
else
- return reportError(Ctx, "Invalid value for ShaderVisibility");
+ return make_error<InvalidRSMetadataValue>("ShaderVisibility");
dxbc::RTS0::v2::RootDescriptor Descriptor;
if (std::optional<uint32_t> Val = extractMdIntValue(RootDescriptorNode, 2))
Descriptor.ShaderRegister = *Val;
else
- return reportError(Ctx, "Invalid value for ShaderRegister");
+ return make_error<InvalidRSMetadataValue>("ShaderRegister");
if (std::optional<uint32_t> Val = extractMdIntValue(RootDescriptorNode, 3))
Descriptor.RegisterSpace = *Val;
else
- return reportError(Ctx, "Invalid value for RegisterSpace");
+ return make_error<InvalidRSMetadataValue>("RegisterSpace");
if (RSD.Version == 1) {
RSD.ParametersContainer.addParameter(Header, Descriptor);
- return false;
+ return Error::success();
}
assert(RSD.Version > 1);
if (std::optional<uint32_t> Val = extractMdIntValue(RootDescriptorNode, 4))
Descriptor.Flags = *Val;
else
- return reportError(Ctx, "Invalid value for Root Descriptor Flags");
+ return make_error<InvalidRSMetadataValue>("Root Descriptor Flags");
RSD.ParametersContainer.addParameter(Header, Descriptor);
- return false;
+ return Error::success();
}
-bool MetadataParser::parseDescriptorRange(LLVMContext *Ctx,
- mcdxbc::DescriptorTable &Table,
- MDNode *RangeDescriptorNode) {
-
+Error MetadataParser::parseDescriptorRange(mcdxbc::DescriptorTable &Table,
+ MDNode *RangeDescriptorNode) {
if (RangeDescriptorNode->getNumOperands() != 6)
- return reportError(Ctx, "Invalid format for Descriptor Range");
+ return make_error<InvalidRSMetadataFormat>("Descriptor Range");
dxbc::RTS0::v2::DescriptorRange Range;
@@ -352,162 +336,161 @@ bool MetadataParser::parseDescriptorRange(LLVMContext *Ctx,
extractMdStringValue(RangeDescriptorNode, 0);
if (!ElementText.has_value())
- return reportError(Ctx, "Descriptor Range, first element is not a string.");
+ return make_error<InvalidRSMetadataFormat>("Descriptor Range");
Range.RangeType =
StringSwitch<uint32_t>(*ElementText)
- .Case("CBV", llvm::to_underlying(dxbc::DescriptorRangeType::CBV))
- .Case("SRV", llvm::to_underlying(dxbc::DescriptorRangeType::SRV))
- .Case("UAV", llvm::to_underlying(dxbc::DescriptorRangeType::UAV))
- .Case("Sampler",
- llvm::to_underlying(dxbc::DescriptorRangeType::Sampler))
+ .Case("CBV", to_underlying(dxbc::DescriptorRangeType::CBV))
+ .Case("SRV", to_underlying(dxbc::DescriptorRangeType::SRV))
+ .Case("UAV", to_underlying(dxbc::DescriptorRangeType::UAV))
+ .Case("Sampler", to_underlying(dxbc::DescriptorRangeType::Sampler))
.Default(~0U);
if (Range.RangeType == ~0U)
- return reportError(Ctx, "Invalid Descriptor Range type: " + *ElementText);
+ return make_error<GenericRSMetadataError>("Invalid Descriptor Range type.",
+ RangeDescriptorNode);
if (std::optional<uint32_t> Val = extractMdIntValue(RangeDescriptorNode, 1))
Range.NumDescriptors = *Val;
else
- return reportError(Ctx, "Invalid value for Number of Descriptor in Range");
+ return make_error<GenericRSMetadataError>("Number of Descriptor in Range",
+ RangeDescriptorNode);
if (std::optional<uint32_t> Val = extractMdIntValue(RangeDescriptorNode, 2))
Range.BaseShaderRegister = *Val;
else
- return reportError(Ctx, "Invalid value for BaseShaderRegister");
+ return make_error<InvalidRSMetadataValue>("BaseShaderRegister");
if (std::optional<uint32_t> Val = extractMdIntValue(RangeDescriptorNode, 3))
Range.RegisterSpace = *Val;
else
- return reportError(Ctx, "Invalid value for RegisterSpace");
+ return make_error<InvalidRSMetadataValue>("RegisterSpace");
if (std::optional<uint32_t> Val = extractMdIntValue(RangeDescriptorNode, 4))
Range.OffsetInDescriptorsFromTableStart = *Val;
else
- return reportError(Ctx,
- "Invalid value for OffsetInDescriptorsFromTableStart");
+ return make_error<InvalidRSMetadataValue>(
+ "OffsetInDescriptorsFromTableStart");
if (std::optional<uint32_t> Val = extractMdIntValue(RangeDescriptorNode, 5))
Range.Flags = *Val;
else
- return reportError(Ctx, "Invalid value for Descriptor Range Flags");
+ return make_error<InvalidRSMetadataValue>("Descriptor Range Flags");
Table.Ranges.push_back(Range);
- return false;
+ return Error::success();
}
-bool MetadataParser::parseDescriptorTable(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD,
- MDNode *DescriptorTableNode) {
+Error MetadataParser::parseDescriptorTable(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *DescriptorTableNode) {
const unsigned int NumOperands = DescriptorTableNode->getNumOperands();
if (NumOperands < 2)
- return reportError(Ctx, "Invalid format for Descriptor Table");
+ return make_error<InvalidRSMetadataFormat>("Descriptor Table");
dxbc::RTS0::v1::RootParameterHeader Header;
if (std::optional<uint32_t> Val = extractMdIntValue(DescriptorTableNode, 1))
Header.ShaderVisibility = *Val;
else
- return reportError(Ctx, "Invalid value for ShaderVisibility");
+ return make_error<InvalidRSMetadataValue>("ShaderVisibility");
mcdxbc::DescriptorTable Table;
Header.ParameterType =
- llvm::to_underlying(dxbc::RootParameterType::DescriptorTable);
+ to_underlying(dxbc::RootParameterType::DescriptorTable);
for (unsigned int I = 2; I < NumOperands; I++) {
MDNode *Element = dyn_cast<MDNode>(DescriptorTableNode->getOperand(I));
if (Element == nullptr)
- return reportError(Ctx, "Missing Root Element Metadata Node.");
+ return make_error<GenericRSMetadataError>(
+ "Missing Root Element Metadata Node.", DescriptorTableNode);
- if (parseDescriptorRange(Ctx, Table, Element))
- return true;
+ if (auto Err = parseDescriptorRange(Table, Element))
+ return Err;
}
RSD.ParametersContainer.addParameter(Header, Table);
- return false;
+ return Error::success();
}
-bool MetadataParser::parseStaticSampler(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD,
- MDNode *StaticSamplerNode) {
+Error MetadataParser::parseStaticSampler(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *StaticSamplerNode) {
if (StaticSamplerNode->getNumOperands() != 14)
- return reportError(Ctx, "Invalid format for Static Sampler");
+ return make_error<InvalidRSMetadataFormat>("Static Sampler");
dxbc::RTS0::v1::StaticSampler Sampler;
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 1))
Sampler.Filter = *Val;
else
- return reportError(Ctx, "Invalid value for Filter");
+ return make_error<InvalidRSMetadataValue>("Filter");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 2))
Sampler.AddressU = *Val;
else
- return reportError(Ctx, "Invalid value for AddressU");
+ return make_error<InvalidRSMetadataValue>("AddressU");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 3))
Sampler.AddressV = *Val;
else
- return reportError(Ctx, "Invalid value for AddressV");
+ return make_error<InvalidRSMetadataValue>("AddressV");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 4))
Sampler.AddressW = *Val;
else
- return reportError(Ctx, "Invalid value for AddressW");
+ return make_error<InvalidRSMetadataValue>("AddressW");
if (std::optional<float> Val = extractMdFloatValue(StaticSamplerNode, 5))
Sampler.MipLODBias = *Val;
else
- return reportError(Ctx, "Invalid value for MipLODBias");
+ return make_error<InvalidRSMetadataValue>("MipLODBias");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 6))
Sampler.MaxAnisotropy = *Val;
else
- return reportError(Ctx, "Invalid value for MaxAnisotropy");
+ return make_error<InvalidRSMetadataValue>("MaxAnisotropy");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 7))
Sampler.ComparisonFunc = *Val;
else
- return reportError(Ctx, "Invalid value for ComparisonFunc ");
+ return make_error<InvalidRSMetadataValue>("ComparisonFunc");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 8))
Sampler.BorderColor = *Val;
else
- return reportError(Ctx, "Invalid value for ComparisonFunc ");
+ return make_error<InvalidRSMetadataValue>("ComparisonFunc");
if (std::optional<float> Val = extractMdFloatValue(StaticSamplerNode, 9))
Sampler.MinLOD = *Val;
else
- return reportError(Ctx, "Invalid value for MinLOD");
+ return make_error<InvalidRSMetadataValue>("MinLOD");
if (std::optional<float> Val = extractMdFloatValue(StaticSamplerNode, 10))
Sampler.MaxLOD = *Val;
else
- return reportError(Ctx, "Invalid value for MaxLOD");
+ return make_error<InvalidRSMetadataValue>("MaxLOD");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 11))
Sampler.ShaderRegister = *Val;
else
- return reportError(Ctx, "Invalid value for ShaderRegister");
+ return make_error<InvalidRSMetadataValue>("ShaderRegister");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 12))
Sampler.RegisterSpace = *Val;
else
- return reportError(Ctx, "Invalid value for RegisterSpace");
+ return make_error<InvalidRSMetadataValue>("RegisterSpace");
if (std::optional<uint32_t> Val = extractMdIntValue(StaticSamplerNode, 13))
Sampler.ShaderVisibility = *Val;
else
- return reportError(Ctx, "Invalid value for ShaderVisibility");
+ return make_error<InvalidRSMetadataValue>("ShaderVisibility");
RSD.StaticSamplers.push_back(Sampler);
- return false;
+ return Error::success();
}
-bool MetadataParser::parseRootSignatureElement(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD,
- MDNode *Element) {
+Error MetadataParser::parseRootSignatureElement(mcdxbc::RootSignatureDesc &RSD,
+ MDNode *Element) {
std::optional<StringRef> ElementText = extractMdStringValue(Element, 0);
if (!ElementText.has_value())
- return reportError(Ctx, "Invalid format for Root Element");
+ return make_error<InvalidRSMetadataFormat>("Root Element");
RootSignatureElementKind ElementKind =
StringSwitch<RootSignatureElementKind>(*ElementText)
@@ -523,79 +506,109 @@ bool MetadataParser::parseRootSignatureElement(LLVMContext *Ctx,
switch (ElementKind) {
case RootSignatureElementKind::RootFlags:
- return parseRootFlags(Ctx, RSD, Element);
+ return parseRootFlags(RSD, Element);
case RootSignatureElementKind::RootConstants:
- return parseRootConstants(Ctx, RSD, Element);
+ return parseRootConstants(RSD, Element);
case RootSignatureElementKind::CBV:
case RootSignatureElementKind::SRV:
case RootSignatureElementKind::UAV:
- return parseRootDescriptors(Ctx, RSD, Element, ElementKind);
+ return parseRootDescriptors(RSD, Element, ElementKind);
case RootSignatureElementKind::DescriptorTable:
- return parseDescriptorTable(Ctx, RSD, Element);
+ return parseDescriptorTable(RSD, Element);
case RootSignatureElementKind::StaticSamplers:
- return parseStaticSampler(Ctx, RSD, Element);
+ return parseStaticSampler(RSD, Element);
case RootSignatureElementKind::Error:
- return reportError(Ctx, "Invalid Root Signature Element: " + *ElementText);
+ return make_error<GenericRSMetadataError>("Invalid Root Signature Element",
+ Element);
}
llvm_unreachable("Unhandled RootSignatureElementKind enum.");
}
-bool MetadataParser::validateRootSignature(
- LLVMContext *Ctx, const llvm::mcdxbc::RootSignatureDesc &RSD) {
- if (!llvm::hlsl::rootsig::verifyVersion(RSD.Version)) {
- return reportValueError(Ctx, "Version", RSD.Version);
+Error MetadataParser::validateRootSignature(
+ const mcdxbc::RootSignatureDesc &RSD) {
+ Error DeferredErrs = Error::success();
+ if (!hlsl::rootsig::verifyVersion(RSD.Version)) {
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "Version", RSD.Version));
}
- if (!llvm::hlsl::rootsig::verifyRootFlag(RSD.Flags)) {
- return reportValueError(Ctx, "RootFlags", RSD.Flags);
+ if (!hlsl::rootsig::verifyRootFlag(RSD.Flags)) {
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "RootFlags", RSD.Flags));
}
for (const mcdxbc::RootParameterInfo &Info : RSD.ParametersContainer) {
if (!dxbc::isValidShaderVisibility(Info.Header.ShaderVisibility))
- return reportValueError(Ctx, "ShaderVisibility",
- Info.Header.ShaderVisibility);
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "ShaderVisibility", Info.Header.ShaderVisibility));
assert(dxbc::isValidParameterType(Info.Header.ParameterType) &&
"Invalid value for ParameterType");
switch (Info.Header.ParameterType) {
- case llvm::to_underlying(dxbc::RootParameterType::CBV):
- case llvm::to_underlying(dxbc::RootParameterType::UAV):
- case llvm::to_underlying(dxbc::RootParameterType::SRV): {
+ case to_underlying(dxbc::RootParameterType::CBV):
+ case to_underlying(dxbc::RootParameterType::UAV):
+ case to_underlying(dxbc::RootParameterType::SRV): {
const dxbc::RTS0::v2::RootDescriptor &Descriptor =
RSD.ParametersContainer.getRootDescriptor(Info.Location);
- if (!llvm::hlsl::rootsig::verifyRegisterValue(Descriptor.ShaderRegister))
- return reportValueError(Ctx, "ShaderRegister",
- Descriptor.ShaderRegister);
-
- if (!llvm::hlsl::rootsig::verifyRegisterSpace(Descriptor.RegisterSpace))
- return reportValueError(Ctx, "RegisterSpace", Descriptor.RegisterSpace);
+ if (!hlsl::rootsig::verifyRegisterValue(Descriptor.ShaderRegister))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "ShaderRegister", Descriptor.ShaderRegister));
+
+ if (!hlsl::rootsig::verifyRegisterSpace(Descriptor.RegisterSpace))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "RegisterSpace", Descriptor.RegisterSpace));
if (RSD.Version > 1) {
- if (!llvm::hlsl::rootsig::verifyRootDescriptorFlag(RSD.Version,
- Descriptor.Flags))
- return reportValueError(Ctx, "RootDescriptorFlag", Descriptor.Flags);
+ if (!hlsl::rootsig::verifyRootDescriptorFlag(RSD.Version,
+ Descriptor.Flags))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "RootDescriptorFlag", Descriptor.Flags));
}
break;
}
- case llvm::to_underlying(dxbc::RootParameterType::DescriptorTable): {
+ case to_underlying(dxbc::RootParameterType::DescriptorTable): {
const mcdxbc::DescriptorTable &Table =
RSD.ParametersContainer.getDescriptorTable(Info.Location);
for (const dxbc::RTS0::v2::DescriptorRange &Range : Table) {
- if (!llvm::hlsl::rootsig::verifyRangeType(Range.RangeType))
- return reportValueError(Ctx, "RangeType", Range.RangeType);
-
- if (!llvm::hlsl::rootsig::verifyRegisterSpace(Range.RegisterSpace))
- return reportValueError(Ctx, "RegisterSpace", Range.RegisterSpace);
-
- if (!llvm::hlsl::rootsig::verifyNumDescriptors(Range.NumDescriptors))
- return reportValueError(Ctx, "NumDescriptors", Range.NumDescriptors);
-
- if (!llvm::hlsl::rootsig::verifyDescriptorRangeFlag(
+ if (!hlsl::rootsig::verifyRangeType(Range.RangeType))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "RangeType", Range.RangeType));
+
+ if (!hlsl::rootsig::verifyRegisterSpace(Range.RegisterSpace))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "RegisterSpace", Range.RegisterSpace));
+
+ if (!hlsl::rootsig::verifyNumDescriptors(Range.NumDescriptors))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "NumDescriptors", Range.NumDescriptors));
+
+ if (!hlsl::rootsig::verifyDescriptorRangeFlag(
RSD.Version, Range.RangeType, Range.Flags))
- return reportValueError(Ctx, "DescriptorFlag", Range.Flags);
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "DescriptorFlag", Range.Flags));
}
break;
}
@@ -603,65 +616,108 @@ bool MetadataParser::validateRootSignature(
}
for (const dxbc::RTS0::v1::StaticSampler &Sampler : RSD.StaticSamplers) {
- if (!llvm::hlsl::rootsig::verifySamplerFilter(Sampler.Filter))
- return reportValueError(Ctx, "Filter", Sampler.Filter);
-
- if (!llvm::hlsl::rootsig::verifyAddress(Sampler.AddressU))
- return reportValueError(Ctx, "AddressU", Sampler.AddressU);
-
- if (!llvm::hlsl::rootsig::verifyAddress(Sampler.AddressV))
- return reportValueError(Ctx, "AddressV", Sampler.AddressV);
-
- if (!llvm::hlsl::rootsig::verifyAddress(Sampler.AddressW))
- return reportValueError(Ctx, "AddressW", Sampler.AddressW);
-
- if (!llvm::hlsl::rootsig::verifyMipLODBias(Sampler.MipLODBias))
- return reportValueError(Ctx, "MipLODBias", Sampler.MipLODBias);
-
- if (!llvm::hlsl::rootsig::verifyMaxAnisotropy(Sampler.MaxAnisotropy))
- return reportValueError(Ctx, "MaxAnisotropy", Sampler.MaxAnisotropy);
-
- if (!llvm::hlsl::rootsig::verifyComparisonFunc(Sampler.ComparisonFunc))
- return reportValueError(Ctx, "ComparisonFunc", Sampler.ComparisonFunc);
-
- if (!llvm::hlsl::rootsig::verifyBorderColor(Sampler.BorderColor))
- return reportValueError(Ctx, "BorderColor", Sampler.BorderColor);
-
- if (!llvm::hlsl::rootsig::verifyLOD(Sampler.MinLOD))
- return reportValueError(Ctx, "MinLOD", Sampler.MinLOD);
-
- if (!llvm::hlsl::rootsig::verifyLOD(Sampler.MaxLOD))
- return reportValueError(Ctx, "MaxLOD", Sampler.MaxLOD);
-
- if (!llvm::hlsl::rootsig::verifyRegisterValue(Sampler.ShaderRegister))
- return reportValueError(Ctx, "ShaderRegister", Sampler.ShaderRegister);
-
- if (!llvm::hlsl::rootsig::verifyRegisterSpace(Sampler.RegisterSpace))
- return reportValueError(Ctx, "RegisterSpace", Sampler.RegisterSpace);
+ if (!hlsl::rootsig::verifySamplerFilter(Sampler.Filter))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "Filter", Sampler.Filter));
+
+ if (!hlsl::rootsig::verifyAddress(Sampler.AddressU))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "AddressU", Sampler.AddressU));
+
+ if (!hlsl::rootsig::verifyAddress(Sampler.AddressV))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "AddressV", Sampler.AddressV));
+
+ if (!hlsl::rootsig::verifyAddress(Sampler.AddressW))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "AddressW", Sampler.AddressW));
+
+ if (!hlsl::rootsig::verifyMipLODBias(Sampler.MipLODBias))
+ DeferredErrs = joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<float>>(
+ "MipLODBias", Sampler.MipLODBias));
+
+ if (!hlsl::rootsig::verifyMaxAnisotropy(Sampler.MaxAnisotropy))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "MaxAnisotropy", Sampler.MaxAnisotropy));
+
+ if (!hlsl::rootsig::verifyComparisonFunc(Sampler.ComparisonFunc))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "ComparisonFunc", Sampler.ComparisonFunc));
+
+ if (!hlsl::rootsig::verifyBorderColor(Sampler.BorderColor))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "BorderColor", Sampler.BorderColor));
+
+ if (!hlsl::rootsig::verifyLOD(Sampler.MinLOD))
+ DeferredErrs = joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<float>>(
+ "MinLOD", Sampler.MinLOD));
+
+ if (!hlsl::rootsig::verifyLOD(Sampler.MaxLOD))
+ DeferredErrs = joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<float>>(
+ "MaxLOD", Sampler.MaxLOD));
+
+ if (!hlsl::rootsig::verifyRegisterValue(Sampler.ShaderRegister))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "ShaderRegister", Sampler.ShaderRegister));
+
+ if (!hlsl::rootsig::verifyRegisterSpace(Sampler.RegisterSpace))
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "RegisterSpace", Sampler.RegisterSpace));
if (!dxbc::isValidShaderVisibility(Sampler.ShaderVisibility))
- return reportValueError(Ctx, "ShaderVisibility",
- Sampler.ShaderVisibility);
+ DeferredErrs =
+ joinErrors(std::move(DeferredErrs),
+ make_error<RootSignatureValidationError<uint32_t>>(
+ "ShaderVisibility", Sampler.ShaderVisibility));
}
- return false;
+ return DeferredErrs;
}
-bool MetadataParser::ParseRootSignature(LLVMContext *Ctx,
- mcdxbc::RootSignatureDesc &RSD) {
- bool HasError = false;
-
- // Loop through the Root Elements of the root signature.
+Expected<mcdxbc::RootSignatureDesc>
+MetadataParser::ParseRootSignature(uint32_t Version) {
+ Error DeferredErrs = Error::success();
+ mcdxbc::RootSignatureDesc RSD;
+ RSD.Version = Version;
for (const auto &Operand : Root->operands()) {
MDNode *Element = dyn_cast<MDNode>(Operand);
if (Element == nullptr)
- return reportError(Ctx, "Missing Root Element Metadata Node.");
+ return joinErrors(std::move(DeferredErrs),
+ make_error<GenericRSMetadataError>(
+ "Missing Root Element Metadata Node.", nullptr));
- HasError = HasError || parseRootSignatureElement(Ctx, RSD, Element) ||
- validateRootSignature(Ctx, RSD);
+ if (auto Err = parseRootSignatureElement(RSD, Element))
+ DeferredErrs = joinErrors(std::move(DeferredErrs), std::move(Err));
}
- return HasError;
+ if (auto Err = validateRootSignature(RSD))
+ DeferredErrs = joinErrors(std::move(DeferredErrs), std::move(Err));
+
+ if (DeferredErrs)
+ return std::move(DeferredErrs);
+
+ return std::move(RSD);
}
} // namespace rootsig
} // namespace hlsl
diff --git a/llvm/lib/IR/DebugInfoMetadata.cpp b/llvm/lib/IR/DebugInfoMetadata.cpp
index f16963d..f1d4549 100644
--- a/llvm/lib/IR/DebugInfoMetadata.cpp
+++ b/llvm/lib/IR/DebugInfoMetadata.cpp
@@ -1012,7 +1012,7 @@ DIDerivedType *DIDerivedType::getImpl(
std::optional<DIDerivedType::PtrAuthData>
DIDerivedType::getPtrAuthData() const {
return getTag() == dwarf::DW_TAG_LLVM_ptrauth_type
- ? std::optional<PtrAuthData>(PtrAuthData(SubclassData32))
+ ? std::make_optional<PtrAuthData>(SubclassData32)
: std::nullopt;
}
diff --git a/llvm/lib/LTO/LTO.cpp b/llvm/lib/LTO/LTO.cpp
index 7f07568..0323b4d 100644
--- a/llvm/lib/LTO/LTO.cpp
+++ b/llvm/lib/LTO/LTO.cpp
@@ -743,8 +743,9 @@ Error LTO::add(std::unique_ptr<InputFile> Input,
Conf.VisibilityScheme = Config::ELF;
}
+ ArrayRef<SymbolResolution> InputRes = Res;
for (unsigned I = 0; I != Input->Mods.size(); ++I) {
- if (auto Err = addModule(*Input, I, Res).moveInto(Res))
+ if (auto Err = addModule(*Input, InputRes, I, Res).moveInto(Res))
return Err;
}
@@ -753,8 +754,8 @@ Error LTO::add(std::unique_ptr<InputFile> Input,
}
Expected<ArrayRef<SymbolResolution>>
-LTO::addModule(InputFile &Input, unsigned ModI,
- ArrayRef<SymbolResolution> Res) {
+LTO::addModule(InputFile &Input, ArrayRef<SymbolResolution> InputRes,
+ unsigned ModI, ArrayRef<SymbolResolution> Res) {
Expected<BitcodeLTOInfo> LTOInfo = Input.Mods[ModI].getLTOInfo();
if (!LTOInfo)
return LTOInfo.takeError();
@@ -791,7 +792,7 @@ LTO::addModule(InputFile &Input, unsigned ModI,
return addThinLTO(BM, ModSyms, Res);
RegularLTO.EmptyCombinedModule = false;
- auto ModOrErr = addRegularLTO(BM, ModSyms, Res);
+ auto ModOrErr = addRegularLTO(Input, InputRes, BM, ModSyms, Res);
if (!ModOrErr)
return ModOrErr.takeError();
Res = ModOrErr->second;
@@ -846,7 +847,8 @@ handleNonPrevailingComdat(GlobalValue &GV,
// linkRegularLTO.
Expected<
std::pair<LTO::RegularLTOState::AddedModule, ArrayRef<SymbolResolution>>>
-LTO::addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
+LTO::addRegularLTO(InputFile &Input, ArrayRef<SymbolResolution> InputRes,
+ BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
ArrayRef<SymbolResolution> Res) {
RegularLTOState::AddedModule Mod;
Expected<std::unique_ptr<Module>> MOrErr =
@@ -860,13 +862,34 @@ LTO::addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
if (Error Err = M.materializeMetadata())
return std::move(Err);
- // If cfi.functions is present and we are in regular LTO mode, LowerTypeTests
- // will rename local functions in the merged module as "<function name>.1".
- // This causes linking errors, since other parts of the module expect the
- // original function name.
- if (LTOMode == LTOK_UnifiedRegular)
+ if (LTOMode == LTOK_UnifiedRegular) {
+ // cfi.functions metadata is intended to be used with ThinLTO and may
+ // trigger invalid IR transformations if they are present when doing regular
+ // LTO, so delete it.
if (NamedMDNode *CfiFunctionsMD = M.getNamedMetadata("cfi.functions"))
M.eraseNamedMetadata(CfiFunctionsMD);
+ } else if (NamedMDNode *AliasesMD = M.getNamedMetadata("aliases")) {
+ // Delete aliases entries for non-prevailing symbols on the ThinLTO side of
+ // this input file.
+ DenseSet<StringRef> Prevailing;
+ for (auto [I, R] : zip(Input.symbols(), InputRes))
+ if (R.Prevailing && !I.getIRName().empty())
+ Prevailing.insert(I.getIRName());
+ std::vector<MDNode *> AliasGroups;
+ for (MDNode *AliasGroup : AliasesMD->operands()) {
+ std::vector<Metadata *> Aliases;
+ for (Metadata *Alias : AliasGroup->operands()) {
+ if (isa<MDString>(Alias) &&
+ Prevailing.count(cast<MDString>(Alias)->getString()))
+ Aliases.push_back(Alias);
+ }
+ if (Aliases.size() > 1)
+ AliasGroups.push_back(MDTuple::get(RegularLTO.Ctx, Aliases));
+ }
+ AliasesMD->clearOperands();
+ for (MDNode *G : AliasGroups)
+ AliasesMD->addOperand(G);
+ }
UpgradeDebugInfo(M);
diff --git a/llvm/lib/MC/MCObjectFileInfo.cpp b/llvm/lib/MC/MCObjectFileInfo.cpp
index 0069d12..393eed1 100644
--- a/llvm/lib/MC/MCObjectFileInfo.cpp
+++ b/llvm/lib/MC/MCObjectFileInfo.cpp
@@ -537,6 +537,8 @@ void MCObjectFileInfo::initELFMCObjectFileInfo(const Triple &T, bool Large) {
EHFrameSection =
Ctx->getELFSection(".eh_frame", EHSectionType, EHSectionFlags);
+ CallGraphSection = Ctx->getELFSection(".callgraph", ELF::SHT_PROGBITS, 0);
+
StackSizesSection = Ctx->getELFSection(".stack_sizes", ELF::SHT_PROGBITS, 0);
PseudoProbeSection = Ctx->getELFSection(".pseudo_probe", DebugSecType, 0);
@@ -1121,6 +1123,24 @@ MCSection *MCObjectFileInfo::getDwarfComdatSection(const char *Name,
}
MCSection *
+MCObjectFileInfo::getCallGraphSection(const MCSection &TextSec) const {
+ if (Ctx->getObjectFileType() != MCContext::IsELF)
+ return CallGraphSection;
+
+ const MCSectionELF &ElfSec = static_cast<const MCSectionELF &>(TextSec);
+ unsigned Flags = ELF::SHF_LINK_ORDER;
+ StringRef GroupName;
+ if (const MCSymbol *Group = ElfSec.getGroup()) {
+ GroupName = Group->getName();
+ Flags |= ELF::SHF_GROUP;
+ }
+
+ return Ctx->getELFSection(".callgraph", ELF::SHT_PROGBITS, Flags, 0,
+ GroupName, true, ElfSec.getUniqueID(),
+ cast<MCSymbolELF>(TextSec.getBeginSymbol()));
+}
+
+MCSection *
MCObjectFileInfo::getStackSizesSection(const MCSection &TextSec) const {
if ((Ctx->getObjectFileType() != MCContext::IsELF) ||
Ctx->getTargetTriple().isPS4())
diff --git a/llvm/lib/MC/MCObjectStreamer.cpp b/llvm/lib/MC/MCObjectStreamer.cpp
index e82393a..e277143 100644
--- a/llvm/lib/MC/MCObjectStreamer.cpp
+++ b/llvm/lib/MC/MCObjectStreamer.cpp
@@ -46,83 +46,27 @@ MCAssembler *MCObjectStreamer::getAssemblerPtr() {
return nullptr;
}
-constexpr size_t FragBlockSize = 16384;
-// Ensure the new fragment can at least store a few bytes.
-constexpr size_t NewFragHeadroom = 8;
-
-static_assert(NewFragHeadroom >= alignof(MCFragment));
-static_assert(FragBlockSize >= sizeof(MCFragment) + NewFragHeadroom);
-
-MCFragment *MCObjectStreamer::allocFragSpace(size_t Headroom) {
- auto Size = std::max(FragBlockSize, sizeof(MCFragment) + Headroom);
- FragSpace = Size - sizeof(MCFragment);
- auto Chunk = std::unique_ptr<char[]>(new char[Size]);
- auto *F = reinterpret_cast<MCFragment *>(Chunk.get());
- FragStorage.push_back(std::move(Chunk));
- return F;
-}
-
void MCObjectStreamer::newFragment() {
- MCFragment *F;
- if (LLVM_LIKELY(sizeof(MCFragment) + NewFragHeadroom <= FragSpace)) {
- auto End = reinterpret_cast<size_t>(getCurFragEnd());
- F = reinterpret_cast<MCFragment *>(
- alignToPowerOf2(End, alignof(MCFragment)));
- FragSpace -= size_t(F) - End + sizeof(MCFragment);
- } else {
- F = allocFragSpace(0);
- }
- new (F) MCFragment();
- addFragment(F);
-}
-
-void MCObjectStreamer::ensureHeadroom(size_t Headroom) {
- if (Headroom <= FragSpace)
- return;
- auto *F = allocFragSpace(Headroom);
- new (F) MCFragment();
- addFragment(F);
+ addFragment(getContext().allocFragment<MCFragment>());
}
-void MCObjectStreamer::insert(MCFragment *Frag) {
- assert(Frag->getKind() != MCFragment::FT_Data &&
+void MCObjectStreamer::insert(MCFragment *F) {
+ assert(F->getKind() != MCFragment::FT_Data &&
"F should have a variable-size tail");
- // Frag is not connected to FragSpace. Before modifying CurFrag with
- // addFragment(Frag), allocate an empty fragment to maintain FragSpace
- // connectivity, potentially reusing CurFrag's associated space.
- MCFragment *F;
- if (LLVM_LIKELY(sizeof(MCFragment) + NewFragHeadroom <= FragSpace)) {
- auto End = reinterpret_cast<size_t>(getCurFragEnd());
- F = reinterpret_cast<MCFragment *>(
- alignToPowerOf2(End, alignof(MCFragment)));
- FragSpace -= size_t(F) - End + sizeof(MCFragment);
- } else {
- F = allocFragSpace(0);
- }
- new (F) MCFragment();
-
- addFragment(Frag);
addFragment(F);
+ newFragment();
}
void MCObjectStreamer::appendContents(ArrayRef<char> Contents) {
- ensureHeadroom(Contents.size());
- assert(FragSpace >= Contents.size());
- llvm::copy(Contents, getCurFragEnd());
- CurFrag->FixedSize += Contents.size();
- FragSpace -= Contents.size();
+ CurFrag->appendContents(Contents);
}
void MCObjectStreamer::appendContents(size_t Num, char Elt) {
- ensureHeadroom(Num);
- MutableArrayRef<char> Data(getCurFragEnd(), Num);
- llvm::fill(Data, Elt);
- CurFrag->FixedSize += Num;
- FragSpace -= Num;
+ CurFrag->appendContents(Num, Elt);
}
void MCObjectStreamer::addFixup(const MCExpr *Value, MCFixupKind Kind) {
- CurFrag->addFixup(MCFixup::create(getCurFragSize(), Value, Kind));
+ CurFrag->addFixup(MCFixup::create(CurFrag->getFixedSize(), Value, Kind));
}
// As a compile-time optimization, avoid allocating and evaluating an MCExpr
@@ -171,8 +115,6 @@ void MCObjectStreamer::reset() {
}
EmitEHFrame = true;
EmitDebugFrame = false;
- FragStorage.clear();
- FragSpace = 0;
MCStreamer::reset();
}
@@ -201,6 +143,7 @@ void MCObjectStreamer::emitCFISections(bool EH, bool Debug, bool SFrame) {
void MCObjectStreamer::emitValueImpl(const MCExpr *Value, unsigned Size,
SMLoc Loc) {
MCStreamer::emitValueImpl(Value, Size, Loc);
+ MCFragment *DF = getCurrentFragment();
MCDwarfLineEntry::make(this, getCurrentSectionOnly());
@@ -215,9 +158,9 @@ void MCObjectStreamer::emitValueImpl(const MCExpr *Value, unsigned Size,
emitIntValue(AbsValue, Size);
return;
}
- ensureHeadroom(Size);
- addFixup(Value, MCFixup::getDataKindForSize(Size));
- appendContents(Size, 0);
+ DF->addFixup(MCFixup::create(DF->getContents().size(), Value,
+ MCFixup::getDataKindForSize(Size)));
+ DF->appendContents(Size, 0);
}
MCSymbol *MCObjectStreamer::emitCFILabel() {
@@ -251,7 +194,7 @@ void MCObjectStreamer::emitLabel(MCSymbol *Symbol, SMLoc Loc) {
// section.
MCFragment *F = CurFrag;
Symbol->setFragment(F);
- Symbol->setOffset(F->getFixedSize());
+ Symbol->setOffset(F->getContents().size());
emitPendingAssignments(Symbol);
}
@@ -317,21 +260,6 @@ void MCObjectStreamer::changeSection(MCSection *Section, uint32_t Subsection) {
F0 = CurFrag;
}
- // To maintain connectivity between CurFrag and FragSpace when CurFrag is
- // modified, allocate an empty fragment and append it to the fragment list.
- // (Subsections[I].second.Tail is not connected to FragSpace.)
- MCFragment *F;
- if (LLVM_LIKELY(sizeof(MCFragment) + NewFragHeadroom <= FragSpace)) {
- auto End = reinterpret_cast<size_t>(getCurFragEnd());
- F = reinterpret_cast<MCFragment *>(
- alignToPowerOf2(End, alignof(MCFragment)));
- FragSpace -= size_t(F) - End + sizeof(MCFragment);
- } else {
- F = allocFragSpace(0);
- }
- new (F) MCFragment();
- F->setParent(Section);
-
auto &Subsections = Section->Subsections;
size_t I = 0, E = Subsections.size();
while (I != E && Subsections[I].first < Subsection)
@@ -339,16 +267,13 @@ void MCObjectStreamer::changeSection(MCSection *Section, uint32_t Subsection) {
// If the subsection number is not in the sorted Subsections list, create a
// new fragment list.
if (I == E || Subsections[I].first != Subsection) {
+ auto *F = getContext().allocFragment<MCFragment>();
+ F->setParent(Section);
Subsections.insert(Subsections.begin() + I,
{Subsection, MCSection::FragList{F, F}});
- Section->CurFragList = &Subsections[I].second;
- CurFrag = F;
- } else {
- Section->CurFragList = &Subsections[I].second;
- CurFrag = Subsections[I].second.Tail;
- // Ensure CurFrag is associated with FragSpace.
- addFragment(F);
}
+ Section->CurFragList = &Subsections[I].second;
+ CurFrag = Section->CurFragList->Tail;
// Define the section symbol at subsection 0's initial fragment if required.
if (!NewSec)
@@ -419,15 +344,11 @@ void MCObjectStreamer::emitInstToData(const MCInst &Inst,
MCFragment *F = getCurrentFragment();
// Append the instruction to the data fragment.
- size_t CodeOffset = getCurFragSize();
- SmallString<16> Content;
+ size_t CodeOffset = F->getContents().size();
SmallVector<MCFixup, 1> Fixups;
- getAssembler().getEmitter().encodeInstruction(Inst, Content, Fixups, STI);
- appendContents(Content);
- if (CurFrag != F) {
- F = CurFrag;
- CodeOffset = 0;
- }
+ getAssembler().getEmitter().encodeInstruction(
+ Inst, F->getContentsForAppending(), Fixups, STI);
+ F->doneAppending();
F->setHasInstructions(STI);
if (Fixups.empty())
diff --git a/llvm/lib/MC/MCWin64EH.cpp b/llvm/lib/MC/MCWin64EH.cpp
index a87648a..72a8dd7 100644
--- a/llvm/lib/MC/MCWin64EH.cpp
+++ b/llvm/lib/MC/MCWin64EH.cpp
@@ -318,9 +318,6 @@ static void EmitUnwindInfo(MCStreamer &streamer, WinEH::FrameInfo *info) {
// Emit the epilog instructions.
if (EnableUnwindV2) {
- // Ensure the fixups and appended content apply to the same fragment.
- OS->ensureHeadroom(info->EpilogMap.size() * 2);
-
bool IsLast = true;
for (const auto &Epilog : llvm::reverse(info->EpilogMap)) {
if (IsLast) {
diff --git a/llvm/lib/MC/MCWinCOFFStreamer.cpp b/llvm/lib/MC/MCWinCOFFStreamer.cpp
index 8be5054..1ffe25c 100644
--- a/llvm/lib/MC/MCWinCOFFStreamer.cpp
+++ b/llvm/lib/MC/MCWinCOFFStreamer.cpp
@@ -280,7 +280,6 @@ void MCWinCOFFStreamer::emitCOFFSymbolIndex(MCSymbol const *Symbol) {
void MCWinCOFFStreamer::emitCOFFSectionIndex(const MCSymbol *Symbol) {
visitUsedSymbol(*Symbol);
const MCSymbolRefExpr *SRE = MCSymbolRefExpr::create(Symbol, getContext());
- ensureHeadroom(2);
addFixup(SRE, FK_SecRel_2);
appendContents(2, 0);
}
@@ -294,7 +293,6 @@ void MCWinCOFFStreamer::emitCOFFSecRel32(const MCSymbol *Symbol,
if (Offset)
MCE = MCBinaryExpr::createAdd(
MCE, MCConstantExpr::create(Offset, getContext()), getContext());
- ensureHeadroom(4);
addFixup(MCE, FK_SecRel_4);
// Emit 4 bytes (zeros) to the object file.
appendContents(4, 0);
@@ -310,7 +308,6 @@ void MCWinCOFFStreamer::emitCOFFImgRel32(const MCSymbol *Symbol,
if (Offset)
MCE = MCBinaryExpr::createAdd(
MCE, MCConstantExpr::create(Offset, getContext()), getContext());
- ensureHeadroom(4);
addFixup(MCE, FK_Data_4);
// Emit 4 bytes (zeros) to the object file.
appendContents(4, 0);
@@ -321,7 +318,6 @@ void MCWinCOFFStreamer::emitCOFFSecNumber(MCSymbol const *Symbol) {
// Create Symbol for section number.
const MCExpr *MCE = MCCOFFSectionNumberTargetExpr::create(
*Symbol, this->getWriter(), getContext());
- ensureHeadroom(4);
addFixup(MCE, FK_Data_4);
// Emit 4 bytes (zeros) to the object file.
appendContents(4, 0);
@@ -332,7 +328,6 @@ void MCWinCOFFStreamer::emitCOFFSecOffset(MCSymbol const *Symbol) {
// Create Symbol for section offset.
const MCExpr *MCE =
MCCOFFSectionOffsetTargetExpr::create(*Symbol, getContext());
- ensureHeadroom(4);
addFixup(MCE, FK_Data_4);
// Emit 4 bytes (zeros) to the object file.
appendContents(4, 0);
diff --git a/llvm/lib/Object/ELFObjectFile.cpp b/llvm/lib/Object/ELFObjectFile.cpp
index 0919c6a..aff047c 100644
--- a/llvm/lib/Object/ELFObjectFile.cpp
+++ b/llvm/lib/Object/ELFObjectFile.cpp
@@ -688,11 +688,20 @@ StringRef ELFObjectFileBase::getNVPTXCPUName() const {
case ELF::EF_CUDA_SM100:
return getPlatformFlags() & ELF::EF_CUDA_ACCELERATORS ? "sm_100a"
: "sm_100";
+ case ELF::EF_CUDA_SM101:
+ return getPlatformFlags() & ELF::EF_CUDA_ACCELERATORS ? "sm_101a"
+ : "sm_101";
+ case ELF::EF_CUDA_SM103:
+ return getPlatformFlags() & ELF::EF_CUDA_ACCELERATORS ? "sm_103a"
+ : "sm_103";
// Rubin architecture.
case ELF::EF_CUDA_SM120:
return getPlatformFlags() & ELF::EF_CUDA_ACCELERATORS ? "sm_120a"
: "sm_120";
+ case ELF::EF_CUDA_SM121:
+ return getPlatformFlags() & ELF::EF_CUDA_ACCELERATORS ? "sm_121a"
+ : "sm_121";
default:
llvm_unreachable("Unknown EF_CUDA_SM value");
}
diff --git a/llvm/lib/ObjectYAML/ELFEmitter.cpp b/llvm/lib/ObjectYAML/ELFEmitter.cpp
index 6de87a8..bc5c68d 100644
--- a/llvm/lib/ObjectYAML/ELFEmitter.cpp
+++ b/llvm/lib/ObjectYAML/ELFEmitter.cpp
@@ -481,7 +481,11 @@ void ELFState<ELFT>::writeELFHeader(raw_ostream &OS) {
Header.e_version = EV_CURRENT;
Header.e_entry = Doc.Header.Entry;
- Header.e_flags = Doc.Header.Flags;
+ if (Doc.Header.Flags)
+ Header.e_flags = *Doc.Header.Flags;
+ else
+ Header.e_flags = 0;
+
Header.e_ehsize = sizeof(Elf_Ehdr);
if (Doc.Header.EPhOff)
diff --git a/llvm/lib/ObjectYAML/ELFYAML.cpp b/llvm/lib/ObjectYAML/ELFYAML.cpp
index 7fcabb68..c27339d 100644
--- a/llvm/lib/ObjectYAML/ELFYAML.cpp
+++ b/llvm/lib/ObjectYAML/ELFYAML.cpp
@@ -1160,7 +1160,7 @@ void MappingTraits<ELFYAML::FileHeader>::mapping(IO &IO,
IO.mapOptional("ABIVersion", FileHdr.ABIVersion, Hex8(0));
IO.mapRequired("Type", FileHdr.Type);
IO.mapOptional("Machine", FileHdr.Machine);
- IO.mapOptional("Flags", FileHdr.Flags, ELFYAML::ELF_EF(0));
+ IO.mapOptional("Flags", FileHdr.Flags);
IO.mapOptional("Entry", FileHdr.Entry, Hex64(0));
IO.mapOptional("SectionHeaderStringTable", FileHdr.SectionHeaderStringTable);
diff --git a/llvm/lib/ProfileData/MemProfReader.cpp b/llvm/lib/ProfileData/MemProfReader.cpp
index 235b134..3fc0dbf 100644
--- a/llvm/lib/ProfileData/MemProfReader.cpp
+++ b/llvm/lib/ProfileData/MemProfReader.cpp
@@ -135,7 +135,7 @@ readMemInfoBlocksV3(const char *Ptr) {
}
llvm::SmallVector<std::pair<uint64_t, MemInfoBlock>>
-readMemInfoBlocksV4(const char *Ptr) {
+readMemInfoBlocksCommon(const char *Ptr, bool IsHistogramEncoded = false) {
using namespace support;
const uint64_t NumItemsToRead =
@@ -145,27 +145,74 @@ readMemInfoBlocksV4(const char *Ptr) {
for (uint64_t I = 0; I < NumItemsToRead; I++) {
const uint64_t Id =
endian::readNext<uint64_t, llvm::endianness::little, unaligned>(Ptr);
- // We cheat a bit here and remove the const from cast to set the
- // Histogram Pointer to newly allocated buffer.
- MemInfoBlock MIB = *reinterpret_cast<const MemInfoBlock *>(Ptr);
- // Only increment by size of MIB since readNext implicitly increments.
- Ptr += sizeof(MemInfoBlock);
+ MemInfoBlock MIB;
+#define READ_MIB_FIELD(FIELD) \
+ MIB.FIELD = endian::readNext<decltype(MIB.FIELD), llvm::endianness::little, \
+ unaligned>(Ptr)
+
+ READ_MIB_FIELD(AllocCount);
+ READ_MIB_FIELD(TotalAccessCount);
+ READ_MIB_FIELD(MinAccessCount);
+ READ_MIB_FIELD(MaxAccessCount);
+ READ_MIB_FIELD(TotalSize);
+ READ_MIB_FIELD(MinSize);
+ READ_MIB_FIELD(MaxSize);
+ READ_MIB_FIELD(AllocTimestamp);
+ READ_MIB_FIELD(DeallocTimestamp);
+ READ_MIB_FIELD(TotalLifetime);
+ READ_MIB_FIELD(MinLifetime);
+ READ_MIB_FIELD(MaxLifetime);
+ READ_MIB_FIELD(AllocCpuId);
+ READ_MIB_FIELD(DeallocCpuId);
+ READ_MIB_FIELD(NumMigratedCpu);
+ READ_MIB_FIELD(NumLifetimeOverlaps);
+ READ_MIB_FIELD(NumSameAllocCpu);
+ READ_MIB_FIELD(NumSameDeallocCpu);
+ READ_MIB_FIELD(DataTypeId);
+ READ_MIB_FIELD(TotalAccessDensity);
+ READ_MIB_FIELD(MinAccessDensity);
+ READ_MIB_FIELD(MaxAccessDensity);
+ READ_MIB_FIELD(TotalLifetimeAccessDensity);
+ READ_MIB_FIELD(MinLifetimeAccessDensity);
+ READ_MIB_FIELD(MaxLifetimeAccessDensity);
+ READ_MIB_FIELD(AccessHistogramSize);
+ READ_MIB_FIELD(AccessHistogram);
+#undef READ_MIB_FIELD
if (MIB.AccessHistogramSize > 0) {
+ // The in-memory representation uses uint64_t for histogram entries.
MIB.AccessHistogram =
(uintptr_t)malloc(MIB.AccessHistogramSize * sizeof(uint64_t));
- }
-
- for (uint64_t J = 0; J < MIB.AccessHistogramSize; J++) {
- ((uint64_t *)MIB.AccessHistogram)[J] =
- endian::readNext<uint64_t, llvm::endianness::little, unaligned>(Ptr);
+ for (uint64_t J = 0; J < MIB.AccessHistogramSize; J++) {
+ if (!IsHistogramEncoded) {
+ ((uint64_t *)MIB.AccessHistogram)[J] =
+ endian::readNext<uint64_t, llvm::endianness::little, unaligned>(
+ Ptr);
+ } else {
+ // The encoded on-disk format (V5 onwards) uses uint16_t.
+ const uint16_t Val =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(
+ Ptr);
+ ((uint64_t *)MIB.AccessHistogram)[J] = decodeHistogramCount(Val);
+ }
+ }
}
Items.push_back({Id, MIB});
}
return Items;
}
+llvm::SmallVector<std::pair<uint64_t, MemInfoBlock>>
+readMemInfoBlocksV4(const char *Ptr) {
+ return readMemInfoBlocksCommon(Ptr);
+}
+
+llvm::SmallVector<std::pair<uint64_t, MemInfoBlock>>
+readMemInfoBlocksV5(const char *Ptr) {
+ return readMemInfoBlocksCommon(Ptr, /*IsHistogramEncoded=*/true);
+}
+
CallStackMap readStackInfo(const char *Ptr) {
using namespace support;
@@ -658,6 +705,8 @@ RawMemProfReader::readMemInfoBlocks(const char *Ptr) {
return readMemInfoBlocksV3(Ptr);
if (MemprofRawVersion == 4ULL)
return readMemInfoBlocksV4(Ptr);
+ if (MemprofRawVersion == 5ULL)
+ return readMemInfoBlocksV5(Ptr);
llvm_unreachable(
"Panic: Unsupported version number when reading MemInfoBlocks");
}
diff --git a/llvm/lib/Remarks/RemarkLinker.cpp b/llvm/lib/Remarks/RemarkLinker.cpp
index 0ca6217..b00419b 100644
--- a/llvm/lib/Remarks/RemarkLinker.cpp
+++ b/llvm/lib/Remarks/RemarkLinker.cpp
@@ -70,8 +70,8 @@ Error RemarkLinker::link(StringRef Buffer, Format RemarkFormat) {
Expected<std::unique_ptr<RemarkParser>> MaybeParser =
createRemarkParserFromMeta(
RemarkFormat, Buffer,
- PrependPath ? std::optional<StringRef>(StringRef(*PrependPath))
- : std::optional<StringRef>());
+ PrependPath ? std::make_optional<StringRef>(*PrependPath)
+ : std::nullopt);
if (!MaybeParser)
return MaybeParser.takeError();
diff --git a/llvm/lib/Support/BLAKE3/CMakeLists.txt b/llvm/lib/Support/BLAKE3/CMakeLists.txt
index eae2b02..90311ae 100644
--- a/llvm/lib/Support/BLAKE3/CMakeLists.txt
+++ b/llvm/lib/Support/BLAKE3/CMakeLists.txt
@@ -26,7 +26,8 @@ endmacro()
if (CAN_USE_ASSEMBLER)
if (MSVC)
check_symbol_exists(_M_X64 "" IS_X64)
- if (IS_X64)
+ check_symbol_exists(_M_ARM64EC "" IS_ARM64EC)
+ if (IS_X64 AND NOT IS_ARM64EC)
enable_language(ASM_MASM)
set(LLVM_BLAKE3_ASM_FILES
blake3_sse2_x86-64_windows_msvc.asm
diff --git a/llvm/lib/Support/FileCollector.cpp b/llvm/lib/Support/FileCollector.cpp
index 29436f8..edb5313 100644
--- a/llvm/lib/Support/FileCollector.cpp
+++ b/llvm/lib/Support/FileCollector.cpp
@@ -313,5 +313,6 @@ private:
IntrusiveRefCntPtr<vfs::FileSystem>
FileCollector::createCollectorVFS(IntrusiveRefCntPtr<vfs::FileSystem> BaseFS,
std::shared_ptr<FileCollector> Collector) {
- return new FileCollectorFileSystem(std::move(BaseFS), std::move(Collector));
+ return makeIntrusiveRefCnt<FileCollectorFileSystem>(std::move(BaseFS),
+ std::move(Collector));
}
diff --git a/llvm/lib/Support/VirtualFileSystem.cpp b/llvm/lib/Support/VirtualFileSystem.cpp
index e489282..5d42488 100644
--- a/llvm/lib/Support/VirtualFileSystem.cpp
+++ b/llvm/lib/Support/VirtualFileSystem.cpp
@@ -397,7 +397,8 @@ void RealFileSystem::printImpl(raw_ostream &OS, PrintType Type,
}
IntrusiveRefCntPtr<FileSystem> vfs::getRealFileSystem() {
- static IntrusiveRefCntPtr<FileSystem> FS(new RealFileSystem(true));
+ static IntrusiveRefCntPtr<FileSystem> FS =
+ makeIntrusiveRefCnt<RealFileSystem>(true);
return FS;
}
@@ -2217,9 +2218,9 @@ RedirectingFileSystem::create(std::unique_ptr<MemoryBuffer> Buffer,
std::unique_ptr<RedirectingFileSystem> RedirectingFileSystem::create(
ArrayRef<std::pair<std::string, std::string>> RemappedFiles,
- bool UseExternalNames, FileSystem &ExternalFS) {
+ bool UseExternalNames, llvm::IntrusiveRefCntPtr<FileSystem> ExternalFS) {
std::unique_ptr<RedirectingFileSystem> FS(
- new RedirectingFileSystem(&ExternalFS));
+ new RedirectingFileSystem(ExternalFS));
FS->UseExternalNames = UseExternalNames;
StringMap<RedirectingFileSystem::Entry *> Entries;
@@ -2228,7 +2229,7 @@ std::unique_ptr<RedirectingFileSystem> RedirectingFileSystem::create(
SmallString<128> From = StringRef(Mapping.first);
SmallString<128> To = StringRef(Mapping.second);
{
- auto EC = ExternalFS.makeAbsolute(From);
+ auto EC = ExternalFS->makeAbsolute(From);
(void)EC;
assert(!EC && "Could not make absolute path");
}
@@ -2250,7 +2251,7 @@ std::unique_ptr<RedirectingFileSystem> RedirectingFileSystem::create(
}
assert(Parent && "File without a directory?");
{
- auto EC = ExternalFS.makeAbsolute(To);
+ auto EC = ExternalFS->makeAbsolute(To);
(void)EC;
assert(!EC && "Could not make absolute path");
}
diff --git a/llvm/lib/Support/Windows/Threading.inc b/llvm/lib/Support/Windows/Threading.inc
index 8dd7c88..b11f216 100644
--- a/llvm/lib/Support/Windows/Threading.inc
+++ b/llvm/lib/Support/Windows/Threading.inc
@@ -136,6 +136,7 @@ HMODULE loadSystemModuleSecure(LPCWSTR lpModuleName) {
} // namespace llvm::sys::windows
SetThreadPriorityResult llvm::set_thread_priority(ThreadPriority Priority) {
+#ifdef THREAD_POWER_THROTTLING_CURRENT_VERSION
HMODULE kernelM = llvm::sys::windows::loadSystemModuleSecure(L"kernel32.dll");
if (kernelM) {
// SetThreadInformation is only available on Windows 8 and later. Since we
@@ -166,6 +167,7 @@ SetThreadPriorityResult llvm::set_thread_priority(ThreadPriority Priority) {
: 0);
}
}
+#endif
// https://docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-setthreadpriority
// Begin background processing mode. The system lowers the resource scheduling
diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index ca09598..99f0af5 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -39,8 +39,8 @@ let Predicates = [HasDotProd] in {
def ext_addv_to_udot_addv : GICombineRule<
(defs root:$root, ext_addv_to_udot_addv_matchinfo:$matchinfo),
(match (wip_match_opcode G_VECREDUCE_ADD):$root,
- [{ return matchExtAddvToUdotAddv(*${root}, MRI, STI, ${matchinfo}); }]),
- (apply [{ applyExtAddvToUdotAddv(*${root}, MRI, B, Observer, STI, ${matchinfo}); }])
+ [{ return matchExtAddvToDotAddv(*${root}, MRI, STI, ${matchinfo}); }]),
+ (apply [{ applyExtAddvToDotAddv(*${root}, MRI, B, Observer, STI, ${matchinfo}); }])
>;
}
@@ -62,8 +62,10 @@ class push_opcode_through_ext<Instruction opcode, Instruction extOpcode> : GICom
def push_sub_through_zext : push_opcode_through_ext<G_SUB, G_ZEXT>;
def push_add_through_zext : push_opcode_through_ext<G_ADD, G_ZEXT>;
+def push_mul_through_zext : push_opcode_through_ext<G_MUL, G_ZEXT>;
def push_sub_through_sext : push_opcode_through_ext<G_SUB, G_SEXT>;
def push_add_through_sext : push_opcode_through_ext<G_ADD, G_SEXT>;
+def push_mul_through_sext : push_opcode_through_ext<G_MUL, G_SEXT>;
def AArch64PreLegalizerCombiner: GICombiner<
"AArch64PreLegalizerCombinerImpl", [all_combines,
@@ -75,8 +77,10 @@ def AArch64PreLegalizerCombiner: GICombiner<
ext_uaddv_to_uaddlv,
push_sub_through_zext,
push_add_through_zext,
+ push_mul_through_zext,
push_sub_through_sext,
- push_add_through_sext]> {
+ push_add_through_sext,
+ push_mul_through_sext]> {
let CombineAllMethodName = "tryCombineAllImpl";
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4f9471c..4f6e3dd 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -8952,6 +8952,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
bool &IsTailCall = CLI.IsTailCall;
CallingConv::ID &CallConv = CLI.CallConv;
bool IsVarArg = CLI.IsVarArg;
+ const CallBase *CB = CLI.CB;
MachineFunction &MF = DAG.getMachineFunction();
MachineFunction::CallSiteInfo CSInfo;
@@ -8991,6 +8992,10 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
*DAG.getContext());
RetCCInfo.AnalyzeCallResult(Ins, RetCC);
+ // Set type id for call site info.
+ if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall())
+ CSInfo = MachineFunction::CallSiteInfo(*CB);
+
// Check callee args/returns for SVE registers and set calling convention
// accordingly.
if (CallConv == CallingConv::C || CallConv == CallingConv::Fast) {
@@ -24174,13 +24179,6 @@ static SDValue combineStoreValueFPToInt(StoreSDNode *ST,
SDValue VecFP = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecSrcVT, FPSrc);
SDValue VecConv = DAG.getNode(Value.getOpcode(), DL, VecDstVT, VecFP);
- if (ST->isTruncatingStore()) {
- EVT NewVecDstVT = EVT::getVectorVT(
- *DAG.getContext(), ST->getMemoryVT(),
- VecDstVT.getFixedSizeInBits() / ST->getMemoryVT().getFixedSizeInBits());
- VecConv = DAG.getNode(AArch64ISD::NVCAST, DL, NewVecDstVT, VecConv);
- }
-
SDValue Zero = DAG.getVectorIdxConstant(0, DL);
SDValue Extracted =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, VecConv, Zero);
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 40f49da..e1adc0b 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -270,6 +270,13 @@ bool AArch64TTIImpl::areInlineCompatible(const Function *Caller,
const Function *Callee) const {
SMECallAttrs CallAttrs(*Caller, *Callee);
+ // Never inline a function explicitly marked as being streaming,
+ // into a non-streaming function. Assume it was marked as streaming
+ // for a reason.
+ if (CallAttrs.caller().hasNonStreamingInterfaceAndBody() &&
+ CallAttrs.callee().hasStreamingInterfaceOrBody())
+ return false;
+
// When inlining, we should consider the body of the function, not the
// interface.
if (CallAttrs.callee().hasStreamingBody()) {
@@ -4905,14 +4912,17 @@ void AArch64TTIImpl::getUnrollingPreferences(
// Disable partial & runtime unrolling on -Os.
UP.PartialOptSizeThreshold = 0;
- // No need to unroll auto-vectorized loops
- if (findStringMetadataForLoop(L, "llvm.loop.isvectorized"))
- return;
-
// Scan the loop: don't unroll loops with calls as this could prevent
- // inlining.
+ // inlining. Don't unroll auto-vectorized loops either, though do allow
+ // unrolling of the scalar remainder.
+ bool IsVectorized = getBooleanLoopAttribute(L, "llvm.loop.isvectorized");
for (auto *BB : L->getBlocks()) {
for (auto &I : *BB) {
+ // Both auto-vectorized loops and the scalar remainder have the
+ // isvectorized attribute, so differentiate between them by the presence
+ // of vector instructions.
+ if (IsVectorized && I.getType()->isVectorTy())
+ return;
if (isa<CallBase>(I)) {
if (isa<CallInst>(I) || isa<InvokeInst>(I))
if (const Function *F = cast<CallBase>(I).getCalledFunction())
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
index 0b79850..1a15075 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
@@ -50,8 +50,10 @@ bool AArch64GISelUtils::isCMN(const MachineInstr *MaybeSub,
//
// %sub = G_SUB 0, %y
// %cmp = G_ICMP eq/ne, %z, %sub
+ // or with signed comparisons with the no-signed-wrap flag set
if (!MaybeSub || MaybeSub->getOpcode() != TargetOpcode::G_SUB ||
- !CmpInst::isEquality(Pred))
+ (!CmpInst::isEquality(Pred) &&
+ !(CmpInst::isSigned(Pred) && MaybeSub->getFlag(MachineInstr::NoSWrap))))
return false;
auto MaybeZero =
getIConstantVRegValWithLookThrough(MaybeSub->getOperand(1).getReg(), MRI);
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 1381a9b..d905692 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -1810,7 +1810,7 @@ bool AArch64InstructionSelector::selectCompareBranchFedByICmp(
// Couldn't optimize. Emit a compare + a Bcc.
MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
- auto PredOp = ICmp.getOperand(1);
+ auto &PredOp = ICmp.getOperand(1);
emitIntegerCompare(ICmp.getOperand(2), ICmp.getOperand(3), PredOp, MIB);
const AArch64CC::CondCode CC = changeICMPPredToAArch64CC(
static_cast<CmpInst::Predicate>(PredOp.getPredicate()));
@@ -2506,12 +2506,12 @@ bool AArch64InstructionSelector::earlySelect(MachineInstr &I) {
return false;
}
auto &PredOp = Cmp->getOperand(1);
- auto Pred = static_cast<CmpInst::Predicate>(PredOp.getPredicate());
- const AArch64CC::CondCode InvCC =
- changeICMPPredToAArch64CC(CmpInst::getInversePredicate(Pred));
MIB.setInstrAndDebugLoc(I);
emitIntegerCompare(/*LHS=*/Cmp->getOperand(2),
/*RHS=*/Cmp->getOperand(3), PredOp, MIB);
+ auto Pred = static_cast<CmpInst::Predicate>(PredOp.getPredicate());
+ const AArch64CC::CondCode InvCC =
+ changeICMPPredToAArch64CC(CmpInst::getInversePredicate(Pred));
emitCSINC(/*Dst=*/AddDst, /*Src =*/AddLHS, /*Src2=*/AddLHS, InvCC, MIB);
I.eraseFromParent();
return true;
@@ -3574,10 +3574,11 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
return false;
}
- auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
+ auto &PredOp = I.getOperand(1);
+ emitIntegerCompare(I.getOperand(2), I.getOperand(3), PredOp, MIB);
+ auto Pred = static_cast<CmpInst::Predicate>(PredOp.getPredicate());
const AArch64CC::CondCode InvCC =
changeICMPPredToAArch64CC(CmpInst::getInversePredicate(Pred));
- emitIntegerCompare(I.getOperand(2), I.getOperand(3), I.getOperand(1), MIB);
emitCSINC(/*Dst=*/I.getOperand(0).getReg(), /*Src1=*/AArch64::WZR,
/*Src2=*/AArch64::WZR, InvCC, MIB);
I.eraseFromParent();
@@ -5096,11 +5097,11 @@ bool AArch64InstructionSelector::tryOptSelect(GSelect &I) {
AArch64CC::CondCode CondCode;
if (CondOpc == TargetOpcode::G_ICMP) {
- auto Pred =
- static_cast<CmpInst::Predicate>(CondDef->getOperand(1).getPredicate());
+ auto &PredOp = CondDef->getOperand(1);
+ emitIntegerCompare(CondDef->getOperand(2), CondDef->getOperand(3), PredOp,
+ MIB);
+ auto Pred = static_cast<CmpInst::Predicate>(PredOp.getPredicate());
CondCode = changeICMPPredToAArch64CC(Pred);
- emitIntegerCompare(CondDef->getOperand(2), CondDef->getOperand(3),
- CondDef->getOperand(1), MIB);
} else {
// Get the condition code for the select.
auto Pred =
@@ -5148,29 +5149,37 @@ MachineInstr *AArch64InstructionSelector::tryFoldIntegerCompare(
MachineInstr *LHSDef = getDefIgnoringCopies(LHS.getReg(), MRI);
MachineInstr *RHSDef = getDefIgnoringCopies(RHS.getReg(), MRI);
auto P = static_cast<CmpInst::Predicate>(Predicate.getPredicate());
+
// Given this:
//
// x = G_SUB 0, y
- // G_ICMP x, z
+ // G_ICMP z, x
//
// Produce this:
//
- // cmn y, z
- if (isCMN(LHSDef, P, MRI))
- return emitCMN(LHSDef->getOperand(2), RHS, MIRBuilder);
+ // cmn z, y
+ if (isCMN(RHSDef, P, MRI))
+ return emitCMN(LHS, RHSDef->getOperand(2), MIRBuilder);
- // Same idea here, but with the RHS of the compare instead:
+ // Same idea here, but with the LHS of the compare instead:
//
// Given this:
//
// x = G_SUB 0, y
- // G_ICMP z, x
+ // G_ICMP x, z
//
// Produce this:
//
- // cmn z, y
- if (isCMN(RHSDef, P, MRI))
- return emitCMN(LHS, RHSDef->getOperand(2), MIRBuilder);
+ // cmn y, z
+ //
+ // But be careful! We need to swap the predicate!
+ if (isCMN(LHSDef, P, MRI)) {
+ if (!CmpInst::isEquality(P)) {
+ P = CmpInst::getSwappedPredicate(P);
+ Predicate = MachineOperand::CreatePredicate(P);
+ }
+ return emitCMN(LHSDef->getOperand(2), RHS, MIRBuilder);
+ }
// Given this:
//
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
index 1cd9453..8c10673 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PreLegalizerCombiner.cpp
@@ -228,12 +228,13 @@ void applyFoldGlobalOffset(MachineInstr &MI, MachineRegisterInfo &MRI,
B.buildConstant(LLT::scalar(64), -static_cast<int64_t>(MinOffset)));
}
-// Combines vecreduce_add(mul(ext(x), ext(y))) -> vecreduce_add(udot(x, y))
-// Or vecreduce_add(ext(x)) -> vecreduce_add(udot(x, 1))
+// Combines vecreduce_add(mul(ext(x), ext(y))) -> vecreduce_add([us]dot(x, y))
+// Or vecreduce_add(ext(mul(ext(x), ext(y)))) -> vecreduce_add([us]dot(x, y))
+// Or vecreduce_add(ext(x)) -> vecreduce_add([us]dot(x, 1))
// Similar to performVecReduceAddCombine in SelectionDAG
-bool matchExtAddvToUdotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
- const AArch64Subtarget &STI,
- std::tuple<Register, Register, bool> &MatchInfo) {
+bool matchExtAddvToDotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
+ const AArch64Subtarget &STI,
+ std::tuple<Register, Register, bool> &MatchInfo) {
assert(MI.getOpcode() == TargetOpcode::G_VECREDUCE_ADD &&
"Expected a G_VECREDUCE_ADD instruction");
assert(STI.hasDotProd() && "Target should have Dot Product feature");
@@ -246,31 +247,57 @@ bool matchExtAddvToUdotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
if (DstTy.getScalarSizeInBits() != 32 || MidTy.getScalarSizeInBits() != 32)
return false;
- LLT SrcTy;
- auto I1Opc = I1->getOpcode();
- if (I1Opc == TargetOpcode::G_MUL) {
+ // Detect mul(ext, ext) with symmetric ext's. If I1Opc is G_ZEXT or G_SEXT
+ // then the ext's must match the same opcode. It is set to the ext opcode on
+ // output.
+ auto tryMatchingMulOfExt = [&MRI](MachineInstr *MI, Register &Out1,
+ Register &Out2, unsigned &I1Opc) {
// If result of this has more than 1 use, then there is no point in creating
- // udot instruction
- if (!MRI.hasOneNonDBGUse(MidReg))
+ // a dot instruction
+ if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
return false;
MachineInstr *ExtMI1 =
- getDefIgnoringCopies(I1->getOperand(1).getReg(), MRI);
+ getDefIgnoringCopies(MI->getOperand(1).getReg(), MRI);
MachineInstr *ExtMI2 =
- getDefIgnoringCopies(I1->getOperand(2).getReg(), MRI);
+ getDefIgnoringCopies(MI->getOperand(2).getReg(), MRI);
LLT Ext1DstTy = MRI.getType(ExtMI1->getOperand(0).getReg());
LLT Ext2DstTy = MRI.getType(ExtMI2->getOperand(0).getReg());
if (ExtMI1->getOpcode() != ExtMI2->getOpcode() || Ext1DstTy != Ext2DstTy)
return false;
+ if ((I1Opc == TargetOpcode::G_ZEXT || I1Opc == TargetOpcode::G_SEXT) &&
+ I1Opc != ExtMI1->getOpcode())
+ return false;
+ Out1 = ExtMI1->getOperand(1).getReg();
+ Out2 = ExtMI2->getOperand(1).getReg();
I1Opc = ExtMI1->getOpcode();
- SrcTy = MRI.getType(ExtMI1->getOperand(1).getReg());
- std::get<0>(MatchInfo) = ExtMI1->getOperand(1).getReg();
- std::get<1>(MatchInfo) = ExtMI2->getOperand(1).getReg();
+ return true;
+ };
+
+ LLT SrcTy;
+ unsigned I1Opc = I1->getOpcode();
+ if (I1Opc == TargetOpcode::G_MUL) {
+ Register Out1, Out2;
+ if (!tryMatchingMulOfExt(I1, Out1, Out2, I1Opc))
+ return false;
+ SrcTy = MRI.getType(Out1);
+ std::get<0>(MatchInfo) = Out1;
+ std::get<1>(MatchInfo) = Out2;
} else if (I1Opc == TargetOpcode::G_ZEXT || I1Opc == TargetOpcode::G_SEXT) {
- SrcTy = MRI.getType(I1->getOperand(1).getReg());
- std::get<0>(MatchInfo) = I1->getOperand(1).getReg();
- std::get<1>(MatchInfo) = 0;
+ Register I1Op = I1->getOperand(1).getReg();
+ MachineInstr *M = getDefIgnoringCopies(I1Op, MRI);
+ Register Out1, Out2;
+ if (M->getOpcode() == TargetOpcode::G_MUL &&
+ tryMatchingMulOfExt(M, Out1, Out2, I1Opc)) {
+ SrcTy = MRI.getType(Out1);
+ std::get<0>(MatchInfo) = Out1;
+ std::get<1>(MatchInfo) = Out2;
+ } else {
+ SrcTy = MRI.getType(I1Op);
+ std::get<0>(MatchInfo) = I1Op;
+ std::get<1>(MatchInfo) = 0;
+ }
} else {
return false;
}
@@ -288,11 +315,11 @@ bool matchExtAddvToUdotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
return true;
}
-void applyExtAddvToUdotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
- MachineIRBuilder &Builder,
- GISelChangeObserver &Observer,
- const AArch64Subtarget &STI,
- std::tuple<Register, Register, bool> &MatchInfo) {
+void applyExtAddvToDotAddv(MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &Builder,
+ GISelChangeObserver &Observer,
+ const AArch64Subtarget &STI,
+ std::tuple<Register, Register, bool> &MatchInfo) {
assert(MI.getOpcode() == TargetOpcode::G_VECREDUCE_ADD &&
"Expected a G_VECREDUCE_ADD instruction");
assert(STI.hasDotProd() && "Target should have Dot Product feature");
@@ -553,15 +580,15 @@ void applyExtUaddvToUaddlv(MachineInstr &MI, MachineRegisterInfo &MRI,
MI.eraseFromParent();
}
-// Pushes ADD/SUB through extend instructions to decrease the number of extend
-// instruction at the end by allowing selection of {s|u}addl sooner
-
+// Pushes ADD/SUB/MUL through extend instructions to decrease the number of
+// extend instruction at the end by allowing selection of {s|u}addl sooner
// i32 add(i32 ext i8, i32 ext i8) => i32 ext(i16 add(i16 ext i8, i16 ext i8))
bool matchPushAddSubExt(MachineInstr &MI, MachineRegisterInfo &MRI,
Register DstReg, Register SrcReg1, Register SrcReg2) {
assert((MI.getOpcode() == TargetOpcode::G_ADD ||
- MI.getOpcode() == TargetOpcode::G_SUB) &&
- "Expected a G_ADD or G_SUB instruction\n");
+ MI.getOpcode() == TargetOpcode::G_SUB ||
+ MI.getOpcode() == TargetOpcode::G_MUL) &&
+ "Expected a G_ADD, G_SUB or G_MUL instruction\n");
// Deal with vector types only
LLT DstTy = MRI.getType(DstReg);
@@ -594,9 +621,10 @@ void applyPushAddSubExt(MachineInstr &MI, MachineRegisterInfo &MRI,
B.buildInstr(MI.getOpcode(), {MidTy}, {Ext1Reg, Ext2Reg}).getReg(0);
// G_SUB has to sign-extend the result.
- // G_ADD needs to sext from sext and can sext or zext from zext, so the
- // original opcode is used.
- if (MI.getOpcode() == TargetOpcode::G_ADD)
+ // G_ADD needs to sext from sext and can sext or zext from zext, and G_MUL
+ // needs to use the original opcode so the original opcode is used for both.
+ if (MI.getOpcode() == TargetOpcode::G_ADD ||
+ MI.getOpcode() == TargetOpcode::G_MUL)
B.buildInstr(Opc, {DstReg}, {AddReg});
else
B.buildSExt(DstReg, AddReg);
diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
index b9d3e1b..6912caf 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -461,7 +461,7 @@ void AArch64AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
Value <<= Info.TargetOffset;
unsigned Offset = Fixup.getOffset();
- assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+ assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
// Used to point to big endian bytes.
unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind());
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index 071c940..8a0c4ac 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -2576,6 +2576,10 @@ def HasFmaakFmamkF64Insts :
Predicate<"Subtarget->hasFmaakFmamkF64Insts()">,
AssemblerPredicate<(any_of FeatureGFX1250Insts)>;
+def HasAddMinMaxInsts :
+ Predicate<"Subtarget->hasAddMinMaxInsts()">,
+ AssemblerPredicate<(any_of FeatureGFX1250Insts)>;
+
def HasPkAddMinMaxInsts :
Predicate<"Subtarget->hasPkAddMinMaxInsts()">,
AssemblerPredicate<(any_of FeatureGFX1250Insts)>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 5f19837..a9278c1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -89,10 +89,6 @@ static cl::opt<bool> DisableFDivExpand(
cl::ReallyHidden,
cl::init(false));
-static bool hasUnsafeFPMath(const Function &F) {
- return F.getFnAttribute("unsafe-fp-math").getValueAsBool();
-}
-
class AMDGPUCodeGenPrepareImpl
: public InstVisitor<AMDGPUCodeGenPrepareImpl, bool> {
public:
@@ -104,7 +100,6 @@ public:
const DominatorTree *DT;
const UniformityInfo &UA;
const DataLayout &DL;
- const bool HasUnsafeFPMath;
const bool HasFP32DenormalFlush;
bool FlowChanged = false;
mutable Function *SqrtF32 = nullptr;
@@ -117,7 +112,6 @@ public:
const DominatorTree *DT, const UniformityInfo &UA)
: F(F), ST(TM.getSubtarget<GCNSubtarget>(F)), TM(TM), TLI(TLI), AC(AC),
DT(DT), UA(UA), DL(F.getDataLayout()),
- HasUnsafeFPMath(hasUnsafeFPMath(F)),
HasFP32DenormalFlush(SIModeRegisterDefaults(F, ST).FP32Denormals ==
DenormalMode::getPreserveSign()) {}
@@ -637,8 +631,7 @@ bool AMDGPUCodeGenPrepareImpl::canOptimizeWithRsq(const FPMathOperator *SqrtOp,
return false;
// v_rsq_f32 gives 1ulp
- return SqrtFMF.approxFunc() || HasUnsafeFPMath ||
- SqrtOp->getFPAccuracy() >= 1.0f;
+ return SqrtFMF.approxFunc() || SqrtOp->getFPAccuracy() >= 1.0f;
}
Value *AMDGPUCodeGenPrepareImpl::optimizeWithRsq(
@@ -664,7 +657,7 @@ Value *AMDGPUCodeGenPrepareImpl::optimizeWithRsq(
IRBuilder<>::FastMathFlagGuard Guard(Builder);
Builder.setFastMathFlags(DivFMF | SqrtFMF);
- if ((DivFMF.approxFunc() && SqrtFMF.approxFunc()) || HasUnsafeFPMath ||
+ if ((DivFMF.approxFunc() && SqrtFMF.approxFunc()) ||
canIgnoreDenormalInput(Den, CtxI)) {
Value *Result = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rsq, Den);
// -1.0 / sqrt(x) -> fneg(rsq(x))
@@ -680,7 +673,7 @@ Value *AMDGPUCodeGenPrepareImpl::optimizeWithRsq(
// Optimize fdiv with rcp:
//
// 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
-// allowed with unsafe-fp-math or afn.
+// allowed with afn.
//
// a/b -> a*rcp(b) when arcp is allowed, and we only need provide ULP 1.0
Value *
@@ -803,9 +796,9 @@ Value *AMDGPUCodeGenPrepareImpl::visitFDivElement(
//
// With rcp:
// 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
-// allowed with unsafe-fp-math or afn.
+// allowed with afn.
//
-// a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn.
+// a/b -> a*rcp(b) when inaccurate rcp is allowed with afn.
//
// With fdiv.fast:
// a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
@@ -843,7 +836,7 @@ bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
RsqOp = SqrtOp->getOperand(0);
}
- // Inaccurate rcp is allowed with unsafe-fp-math or afn.
+ // Inaccurate rcp is allowed with afn.
//
// Defer to codegen to handle this.
//
@@ -852,7 +845,7 @@ bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
// expansion of afn to codegen. The current interpretation is so aggressive we
// don't need any pre-consideration here when we have better information. A
// more conservative interpretation could use handling here.
- const bool AllowInaccurateRcp = HasUnsafeFPMath || DivFMF.approxFunc();
+ const bool AllowInaccurateRcp = DivFMF.approxFunc();
if (!RsqOp && AllowInaccurateRcp)
return false;
@@ -2026,7 +2019,7 @@ bool AMDGPUCodeGenPrepareImpl::visitSqrt(IntrinsicInst &Sqrt) {
// We're trying to handle the fast-but-not-that-fast case only. The lowering
// of fast llvm.sqrt will give the raw instruction anyway.
- if (SqrtFMF.approxFunc() || HasUnsafeFPMath)
+ if (SqrtFMF.approxFunc())
return false;
const float ReqdAccuracy = FPOp->getFPAccuracy();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
index 2991778..19b8757 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
@@ -204,7 +204,7 @@ MetadataStreamerMsgPackV4::getWorkGroupDimensions(MDNode *Node) const {
for (auto &Op : Node->operands())
Dims.push_back(Dims.getDocument()->getNode(
- uint64_t(mdconst::extract<ConstantInt>(Op)->getZExtValue())));
+ mdconst::extract<ConstantInt>(Op)->getZExtValue()));
return Dims;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 6118933..31c4f62 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -2634,7 +2634,7 @@ bool AMDGPUTargetLowering::allowApproxFunc(const SelectionDAG &DAG,
if (Flags.hasApproximateFuncs())
return true;
auto &Options = DAG.getTarget().Options;
- return Options.UnsafeFPMath || Options.ApproxFuncFPMath;
+ return Options.ApproxFuncFPMath;
}
bool AMDGPUTargetLowering::needsDenormHandlingF32(const SelectionDAG &DAG,
@@ -2757,7 +2757,7 @@ SDValue AMDGPUTargetLowering::LowerFLOGCommon(SDValue Op,
const auto &Options = getTargetMachine().Options;
if (VT == MVT::f16 || Flags.hasApproximateFuncs() ||
- Options.ApproxFuncFPMath || Options.UnsafeFPMath) {
+ Options.ApproxFuncFPMath) {
if (VT == MVT::f16 && !Subtarget->has16BitInsts()) {
// Log and multiply in f32 is good enough for f16.
@@ -3585,7 +3585,7 @@ SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) con
if (N0.getValueType() == MVT::f32)
return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0);
- if (getTargetMachine().Options.UnsafeFPMath) {
+ if (Op->getFlags().hasApproximateFuncs()) {
// There is a generic expand for FP_TO_FP16 with unsafe fast math.
return SDValue();
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index cd9c2ec..b0d3b12 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -6994,13 +6994,13 @@ void AMDGPUInstructionSelector::renderSrcAndDstSelToOpSelXForm_0_0(
MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
assert(OpIdx >= 0 && "expected to match an immediate operand");
MIB.addImm(
- (MI.getOperand(OpIdx).getImm() & 0x2) ? (int64_t)SISrcMods::OP_SEL_0 : 0);
+ (MI.getOperand(OpIdx).getImm() & 0x1) ? (int64_t)SISrcMods::OP_SEL_0 : 0);
}
void AMDGPUInstructionSelector::renderSrcAndDstSelToOpSelXForm_0_1(
MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
assert(OpIdx >= 0 && "expected to match an immediate operand");
- MIB.addImm((MI.getOperand(OpIdx).getImm() & 0x2)
+ MIB.addImm((MI.getOperand(OpIdx).getImm() & 0x1)
? (int64_t)(SISrcMods::OP_SEL_0 | SISrcMods::DST_OP_SEL)
: (int64_t)SISrcMods::DST_OP_SEL);
}
@@ -7009,13 +7009,13 @@ void AMDGPUInstructionSelector::renderSrcAndDstSelToOpSelXForm_1_0(
MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
assert(OpIdx >= 0 && "expected to match an immediate operand");
MIB.addImm(
- (MI.getOperand(OpIdx).getImm() & 0x1) ? (int64_t)SISrcMods::OP_SEL_0 : 0);
+ (MI.getOperand(OpIdx).getImm() & 0x2) ? (int64_t)SISrcMods::OP_SEL_0 : 0);
}
void AMDGPUInstructionSelector::renderSrcAndDstSelToOpSelXForm_1_1(
MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
assert(OpIdx >= 0 && "expected to match an immediate operand");
- MIB.addImm((MI.getOperand(OpIdx).getImm() & 0x1)
+ MIB.addImm((MI.getOperand(OpIdx).getImm() & 0x2)
? (int64_t)(SISrcMods::OP_SEL_0)
: 0);
}
@@ -7044,8 +7044,9 @@ void AMDGPUInstructionSelector::renderSrcAndDstSelToOpSelXForm_2_0(
void AMDGPUInstructionSelector::renderDstSelToOpSel3XFormXForm(
MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
assert(OpIdx >= 0 && "expected to match an immediate operand");
- MIB.addImm(
- (MI.getOperand(OpIdx).getImm() & 0x2) ? (int64_t)SISrcMods::DST_OP_SEL : 0);
+ MIB.addImm((MI.getOperand(OpIdx).getImm() & 0x2)
+ ? (int64_t)SISrcMods::DST_OP_SEL
+ : 0);
}
void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
index 7a50923..511fc69 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
@@ -94,7 +94,6 @@ def NoFP32Denormals : Predicate<"MF->getInfo<SIMachineFunctionInfo>()->getMode()
def NoFP64Denormals : Predicate<"MF->getInfo<SIMachineFunctionInfo>()->getMode().FP64FP16Denormals == DenormalMode::getPreserveSign()">;
def IEEEModeEnabled : Predicate<"MF->getInfo<SIMachineFunctionInfo>()->getMode().IEEE">;
def IEEEModeDisabled : Predicate<"!MF->getInfo<SIMachineFunctionInfo>()->getMode().IEEE">;
-def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">;
}
def FMA : Predicate<"Subtarget->hasFMA()">;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 50da8fd..1fdf272 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -3344,7 +3344,7 @@ static bool allowApproxFunc(const MachineFunction &MF, unsigned Flags) {
if (Flags & MachineInstr::FmAfn)
return true;
const auto &Options = MF.getTarget().Options;
- return Options.UnsafeFPMath || Options.ApproxFuncFPMath;
+ return Options.ApproxFuncFPMath;
}
static bool needsDenormHandlingF32(const MachineFunction &MF, Register Src,
@@ -3450,7 +3450,7 @@ bool AMDGPULegalizerInfo::legalizeFlogCommon(MachineInstr &MI,
static_cast<const AMDGPUTargetMachine &>(MF.getTarget());
if (Ty == F16 || MI.getFlag(MachineInstr::FmAfn) ||
- TM.Options.ApproxFuncFPMath || TM.Options.UnsafeFPMath) {
+ TM.Options.ApproxFuncFPMath) {
if (Ty == F16 && !ST.has16BitInsts()) {
Register LogVal = MRI.createGenericVirtualRegister(F32);
auto PromoteSrc = B.buildFPExt(F32, X);
@@ -4877,9 +4877,7 @@ bool AMDGPULegalizerInfo::legalizeFastUnsafeFDIV(MachineInstr &MI,
uint16_t Flags = MI.getFlags();
LLT ResTy = MRI.getType(Res);
- const MachineFunction &MF = B.getMF();
- bool AllowInaccurateRcp = MI.getFlag(MachineInstr::FmAfn) ||
- MF.getTarget().Options.UnsafeFPMath;
+ bool AllowInaccurateRcp = MI.getFlag(MachineInstr::FmAfn);
if (const auto *CLHS = getConstantFPVRegVal(LHS, MRI)) {
if (!AllowInaccurateRcp && ResTy != LLT::scalar(16))
@@ -4939,9 +4937,7 @@ bool AMDGPULegalizerInfo::legalizeFastUnsafeFDIV64(MachineInstr &MI,
uint16_t Flags = MI.getFlags();
LLT ResTy = MRI.getType(Res);
- const MachineFunction &MF = B.getMF();
- bool AllowInaccurateRcp = MF.getTarget().Options.UnsafeFPMath ||
- MI.getFlag(MachineInstr::FmAfn);
+ bool AllowInaccurateRcp = MI.getFlag(MachineInstr::FmAfn);
if (!AllowInaccurateRcp)
return false;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
index 8767208..aa75534 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
@@ -53,8 +53,6 @@ private:
using FuncInfo = llvm::AMDGPULibFunc;
- bool UnsafeFPMath = false;
-
// -fuse-native.
bool AllNative = false;
@@ -117,7 +115,6 @@ private:
bool AllowStrictFP = false);
protected:
- bool isUnsafeMath(const FPMathOperator *FPOp) const;
bool isUnsafeFiniteOnlyMath(const FPMathOperator *FPOp) const;
bool canIncreasePrecisionOfConstantFold(const FPMathOperator *FPOp) const;
@@ -415,23 +412,17 @@ bool AMDGPULibCalls::parseFunctionName(const StringRef &FMangledName,
return AMDGPULibFunc::parse(FMangledName, FInfo);
}
-bool AMDGPULibCalls::isUnsafeMath(const FPMathOperator *FPOp) const {
- return UnsafeFPMath || FPOp->isFast();
-}
-
bool AMDGPULibCalls::isUnsafeFiniteOnlyMath(const FPMathOperator *FPOp) const {
- return UnsafeFPMath ||
- (FPOp->hasApproxFunc() && FPOp->hasNoNaNs() && FPOp->hasNoInfs());
+ return FPOp->hasApproxFunc() && FPOp->hasNoNaNs() && FPOp->hasNoInfs();
}
bool AMDGPULibCalls::canIncreasePrecisionOfConstantFold(
const FPMathOperator *FPOp) const {
// TODO: Refine to approxFunc or contract
- return isUnsafeMath(FPOp);
+ return FPOp->isFast();
}
void AMDGPULibCalls::initFunction(Function &F, FunctionAnalysisManager &FAM) {
- UnsafeFPMath = F.getFnAttribute("unsafe-fp-math").getValueAsBool();
AC = &FAM.getResult<AssumptionAnalysis>(F);
TLInfo = &FAM.getResult<TargetLibraryAnalysis>(F);
DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 6c40eb5..c8e45d4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -4574,8 +4574,23 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_cvt_pknorm_u16:
case Intrinsic::amdgcn_cvt_pk_i16:
case Intrinsic::amdgcn_cvt_pk_u16:
+ case Intrinsic::amdgcn_cvt_sr_pk_f16_f32:
+ case Intrinsic::amdgcn_cvt_sr_pk_bf16_f32:
case Intrinsic::amdgcn_cvt_pk_f16_fp8:
case Intrinsic::amdgcn_cvt_pk_f16_bf8:
+ case Intrinsic::amdgcn_cvt_pk_fp8_f16:
+ case Intrinsic::amdgcn_cvt_pk_bf8_f16:
+ case Intrinsic::amdgcn_cvt_sr_fp8_f16:
+ case Intrinsic::amdgcn_cvt_sr_bf8_f16:
+ case Intrinsic::amdgcn_cvt_scale_pk8_f16_fp8:
+ case Intrinsic::amdgcn_cvt_scale_pk8_bf16_fp8:
+ case Intrinsic::amdgcn_cvt_scale_pk8_f16_bf8:
+ case Intrinsic::amdgcn_cvt_scale_pk8_bf16_bf8:
+ case Intrinsic::amdgcn_cvt_scale_pk8_f16_fp4:
+ case Intrinsic::amdgcn_cvt_scale_pk8_bf16_fp4:
+ case Intrinsic::amdgcn_cvt_scale_pk8_f32_fp8:
+ case Intrinsic::amdgcn_cvt_scale_pk8_f32_bf8:
+ case Intrinsic::amdgcn_cvt_scale_pk8_f32_fp4:
case Intrinsic::amdgcn_sat_pk4_i4_i8:
case Intrinsic::amdgcn_sat_pk4_u4_u8:
case Intrinsic::amdgcn_fmed3:
@@ -4627,8 +4642,10 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_cvt_pk_f32_fp8:
case Intrinsic::amdgcn_cvt_pk_f32_bf8:
case Intrinsic::amdgcn_cvt_pk_fp8_f32:
+ case Intrinsic::amdgcn_cvt_pk_fp8_f32_e5m3:
case Intrinsic::amdgcn_cvt_pk_bf8_f32:
case Intrinsic::amdgcn_cvt_sr_fp8_f32:
+ case Intrinsic::amdgcn_cvt_sr_fp8_f32_e5m3:
case Intrinsic::amdgcn_cvt_sr_bf8_f32:
case Intrinsic::amdgcn_cvt_sr_bf16_f32:
case Intrinsic::amdgcn_cvt_sr_f16_f32:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 24f4df2..a0c99b0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -597,7 +597,6 @@ InstructionCost GCNTTIImpl::getArithmeticInstrCost(
// Estimate all types may be fused with contract/unsafe flags
const TargetOptions &Options = TLI->getTargetMachine().Options;
if (Options.AllowFPOpFusion == FPOpFusion::Fast ||
- Options.UnsafeFPMath ||
(FAdd->hasAllowContract() && CxtI->hasAllowContract()))
return TargetTransformInfo::TCC_Free;
}
@@ -650,8 +649,7 @@ InstructionCost GCNTTIImpl::getArithmeticInstrCost(
return LT.first * Cost * NElts;
}
- if (SLT == MVT::f32 && ((CxtI && CxtI->hasApproxFunc()) ||
- TLI->getTargetMachine().Options.UnsafeFPMath)) {
+ if (SLT == MVT::f32 && (CxtI && CxtI->hasApproxFunc())) {
// Fast unsafe fdiv lowering:
// f32 rcp
// f32 fmul
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 44e65b3..a83caa0 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -180,6 +180,7 @@ public:
ImmTyMatrixBFMT,
ImmTyMatrixAReuse,
ImmTyMatrixBReuse,
+ ImmTyScaleSel,
ImmTyByteSel,
};
@@ -689,6 +690,8 @@ public:
bool isVSrc_v2f16() const { return isVSrc_f16() || isLiteralImm(MVT::v2f16); }
+ bool isVSrc_NoInline_v2f16() const { return isVSrc_v2f16(); }
+
bool isVISrcB32() const {
return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::i32);
}
@@ -1182,6 +1185,7 @@ public:
case ImmTyMatrixBFMT: OS << "ImmTyMatrixBFMT"; break;
case ImmTyMatrixAReuse: OS << "ImmTyMatrixAReuse"; break;
case ImmTyMatrixBReuse: OS << "ImmTyMatrixBReuse"; break;
+ case ImmTyScaleSel: OS << "ScaleSel" ; break;
case ImmTyByteSel: OS << "ByteSel" ; break;
}
// clang-format on
@@ -2036,6 +2040,7 @@ static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
case AMDGPU::OPERAND_KIMM16:
return &APFloat::IEEEhalf();
case AMDGPU::OPERAND_REG_IMM_BF16:
@@ -2405,6 +2410,7 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2FP32:
case AMDGPU::OPERAND_REG_IMM_V2INT32:
case AMDGPU::OPERAND_KIMM32:
@@ -2456,6 +2462,9 @@ void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyMo
setImmKindConst();
return;
}
+ [[fallthrough]];
+
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
setImmKindLiteral();
@@ -3761,6 +3770,9 @@ bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
OperandType == AMDGPU::OPERAND_REG_INLINE_C_BF16)
return AMDGPU::isInlinableLiteralBF16(Val, hasInv2PiInlineImm());
+ if (OperandType == AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16)
+ return false;
+
llvm_unreachable("invalid operand type");
}
default:
@@ -9356,6 +9368,14 @@ void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
}
}
+ if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::scale_sel))
+ addOptionalImmOperand(Inst, Operands, OptionalIdx,
+ AMDGPUOperand::ImmTyScaleSel);
+
+ if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::clamp))
+ addOptionalImmOperand(Inst, Operands, OptionalIdx,
+ AMDGPUOperand::ImmTyClamp);
+
if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::byte_sel)) {
if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vdst_in))
Inst.addOperand(Inst.getOperand(0));
@@ -9363,10 +9383,6 @@ void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
AMDGPUOperand::ImmTyByteSel);
}
- if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::clamp))
- addOptionalImmOperand(Inst, Operands, OptionalIdx,
- AMDGPUOperand::ImmTyClamp);
-
if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::omod))
addOptionalImmOperand(Inst, Operands, OptionalIdx,
AMDGPUOperand::ImmTyOModSI);
@@ -9420,8 +9436,22 @@ void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst, const OperandVector &Operands,
Opc == AMDGPU::V_CVT_PK_FP8_F32_fake16_e64_dpp8_gfx12 ||
Opc == AMDGPU::V_CVT_SR_FP8_F32_gfx12_e64_dpp_gfx12 ||
Opc == AMDGPU::V_CVT_SR_FP8_F32_gfx12_e64_dpp8_gfx12 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F32_gfx1250_e64_dpp_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F32_gfx1250_e64_dpp8_gfx1250 ||
Opc == AMDGPU::V_CVT_SR_BF8_F32_gfx12_e64_dpp_gfx12 ||
- Opc == AMDGPU::V_CVT_SR_BF8_F32_gfx12_e64_dpp8_gfx12)) {
+ Opc == AMDGPU::V_CVT_SR_BF8_F32_gfx12_e64_dpp8_gfx12 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F16_t16_e64_dpp_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F16_fake16_e64_dpp_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F16_t16_e64_dpp8_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F16_fake16_e64_dpp8_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F16_t16_e64_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_FP8_F16_fake16_e64_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_BF8_F16_t16_e64_dpp_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_BF8_F16_fake16_e64_dpp_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_BF8_F16_t16_e64_dpp8_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_BF8_F16_fake16_e64_dpp8_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_BF8_F16_t16_e64_gfx1250 ||
+ Opc == AMDGPU::V_CVT_SR_BF8_F16_fake16_e64_gfx1250)) {
Inst.addOperand(Inst.getOperand(0));
}
@@ -10016,9 +10046,12 @@ void AMDGPUAsmParser::cvtVOP3DPP(MCInst &Inst, const OperandVector &Operands,
addOptionalImmOperand(Inst, Operands, OptionalIdx,
AMDGPUOperand::ImmTyClamp);
- if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::byte_sel))
+ if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::byte_sel)) {
+ if (VdstInIdx == static_cast<int>(Inst.getNumOperands()))
+ Inst.addOperand(Inst.getOperand(0));
addOptionalImmOperand(Inst, Operands, OptionalIdx,
AMDGPUOperand::ImmTyByteSel);
+ }
if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::omod))
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
index ce1ce68..96d5668 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp
@@ -592,10 +592,13 @@ bool GCNMaxILPSchedStrategy::tryCandidate(SchedCandidate &Cand,
// This is a best effort to set things up for a post-RA pass. Optimizations
// like generating loads of multiple registers should ideally be done within
// the scheduler pass by combining the loads during DAG postprocessing.
- const ClusterInfo *CandCluster = Cand.AtTop ? TopCluster : BotCluster;
- const ClusterInfo *TryCandCluster = TryCand.AtTop ? TopCluster : BotCluster;
- if (tryGreater(TryCandCluster && TryCandCluster->contains(TryCand.SU),
- CandCluster && CandCluster->contains(Cand.SU), TryCand, Cand,
+ unsigned CandZoneCluster = Cand.AtTop ? TopClusterID : BotClusterID;
+ unsigned TryCandZoneCluster = TryCand.AtTop ? TopClusterID : BotClusterID;
+ bool CandIsClusterSucc =
+ isTheSameCluster(CandZoneCluster, Cand.SU->ParentClusterIdx);
+ bool TryCandIsClusterSucc =
+ isTheSameCluster(TryCandZoneCluster, TryCand.SU->ParentClusterIdx);
+ if (tryGreater(TryCandIsClusterSucc, CandIsClusterSucc, TryCand, Cand,
Cluster))
return TryCand.Reason != NoCand;
@@ -666,10 +669,13 @@ bool GCNMaxMemoryClauseSchedStrategy::tryCandidate(SchedCandidate &Cand,
// MaxMemoryClause-specific: We prioritize clustered instructions as we would
// get more benefit from clausing these memory instructions.
- const ClusterInfo *CandCluster = Cand.AtTop ? TopCluster : BotCluster;
- const ClusterInfo *TryCandCluster = TryCand.AtTop ? TopCluster : BotCluster;
- if (tryGreater(TryCandCluster && TryCandCluster->contains(TryCand.SU),
- CandCluster && CandCluster->contains(Cand.SU), TryCand, Cand,
+ unsigned CandZoneCluster = Cand.AtTop ? TopClusterID : BotClusterID;
+ unsigned TryCandZoneCluster = TryCand.AtTop ? TopClusterID : BotClusterID;
+ bool CandIsClusterSucc =
+ isTheSameCluster(CandZoneCluster, Cand.SU->ParentClusterIdx);
+ bool TryCandIsClusterSucc =
+ isTheSameCluster(TryCandZoneCluster, TryCand.SU->ParentClusterIdx);
+ if (tryGreater(TryCandIsClusterSucc, CandIsClusterSucc, TryCand, Cand,
Cluster))
return TryCand.Reason != NoCand;
@@ -896,15 +902,10 @@ GCNScheduleDAGMILive::getRegionLiveInMap() const {
assert(!Regions.empty());
std::vector<MachineInstr *> RegionFirstMIs;
RegionFirstMIs.reserve(Regions.size());
- auto I = Regions.rbegin(), E = Regions.rend();
- do {
- const MachineBasicBlock *MBB = I->first->getParent();
- auto *MI = &*skipDebugInstructionsForward(I->first, I->second);
- RegionFirstMIs.push_back(MI);
- do {
- ++I;
- } while (I != E && I->first->getParent() == MBB);
- } while (I != E);
+ for (auto &[RegionBegin, RegionEnd] : reverse(Regions))
+ RegionFirstMIs.push_back(
+ &*skipDebugInstructionsForward(RegionBegin, RegionEnd));
+
return getLiveRegMap(RegionFirstMIs, /*After=*/false, *LIS);
}
@@ -941,11 +942,9 @@ void GCNScheduleDAGMILive::finalizeSchedule() {
Pressure.resize(Regions.size());
RegionsWithHighRP.resize(Regions.size());
RegionsWithExcessRP.resize(Regions.size());
- RegionsWithMinOcc.resize(Regions.size());
RegionsWithIGLPInstrs.resize(Regions.size());
RegionsWithHighRP.reset();
RegionsWithExcessRP.reset();
- RegionsWithMinOcc.reset();
RegionsWithIGLPInstrs.reset();
runSchedStages();
@@ -1095,8 +1094,7 @@ bool PreRARematStage::initGCNSchedStage() {
// fixed if there is another pass after this pass.
assert(!S.hasNextStage());
- if (!GCNSchedStage::initGCNSchedStage() || DAG.RegionsWithMinOcc.none() ||
- DAG.Regions.size() == 1)
+ if (!GCNSchedStage::initGCNSchedStage() || DAG.Regions.size() == 1)
return false;
// Before performing any IR modification record the parent region of each MI
@@ -1138,11 +1136,6 @@ void UnclusteredHighRPStage::finalizeGCNSchedStage() {
SavedMutations.swap(DAG.Mutations);
S.SGPRLimitBias = S.VGPRLimitBias = 0;
if (DAG.MinOccupancy > InitialOccupancy) {
- for (unsigned IDX = 0; IDX < DAG.Pressure.size(); ++IDX)
- DAG.RegionsWithMinOcc[IDX] =
- DAG.Pressure[IDX].getOccupancy(
- DAG.ST, DAG.MFI.getDynamicVGPRBlockSize()) == DAG.MinOccupancy;
-
LLVM_DEBUG(dbgs() << StageID
<< " stage successfully increased occupancy to "
<< DAG.MinOccupancy << '\n');
@@ -1214,11 +1207,15 @@ bool GCNSchedStage::initGCNRegion() {
}
bool UnclusteredHighRPStage::initGCNRegion() {
- // Only reschedule regions with the minimum occupancy or regions that may have
- // spilling (excess register pressure).
- if ((!DAG.RegionsWithMinOcc[RegionIdx] ||
- DAG.MinOccupancy <= InitialOccupancy) &&
- !DAG.RegionsWithExcessRP[RegionIdx])
+ // Only reschedule regions that have excess register pressure (i.e. spilling)
+ // or had minimum occupancy at the beginning of the stage (as long as
+ // rescheduling of previous regions did not make occupancy drop back down to
+ // the initial minimum).
+ unsigned DynamicVGPRBlockSize = DAG.MFI.getDynamicVGPRBlockSize();
+ if (!DAG.RegionsWithExcessRP[RegionIdx] &&
+ (DAG.MinOccupancy <= InitialOccupancy ||
+ DAG.Pressure[RegionIdx].getOccupancy(ST, DynamicVGPRBlockSize) !=
+ InitialOccupancy))
return false;
return GCNSchedStage::initGCNRegion();
@@ -1283,9 +1280,6 @@ void GCNSchedStage::checkScheduling() {
if (PressureAfter.getSGPRNum() <= S.SGPRCriticalLimit &&
PressureAfter.getVGPRNum(ST.hasGFX90AInsts()) <= S.VGPRCriticalLimit) {
DAG.Pressure[RegionIdx] = PressureAfter;
- DAG.RegionsWithMinOcc[RegionIdx] =
- PressureAfter.getOccupancy(ST, DynamicVGPRBlockSize) ==
- DAG.MinOccupancy;
// Early out if we have achieved the occupancy target.
LLVM_DEBUG(dbgs() << "Pressure in desired limits, done.\n");
@@ -1319,7 +1313,6 @@ void GCNSchedStage::checkScheduling() {
if (NewOccupancy < DAG.MinOccupancy) {
DAG.MinOccupancy = NewOccupancy;
MFI.limitOccupancy(DAG.MinOccupancy);
- DAG.RegionsWithMinOcc.reset();
LLVM_DEBUG(dbgs() << "Occupancy lowered for the function to "
<< DAG.MinOccupancy << ".\n");
}
@@ -1341,14 +1334,10 @@ void GCNSchedStage::checkScheduling() {
// Revert if this region's schedule would cause a drop in occupancy or
// spilling.
- if (shouldRevertScheduling(WavesAfter)) {
+ if (shouldRevertScheduling(WavesAfter))
revertScheduling();
- } else {
+ else
DAG.Pressure[RegionIdx] = PressureAfter;
- DAG.RegionsWithMinOcc[RegionIdx] =
- PressureAfter.getOccupancy(ST, DynamicVGPRBlockSize) ==
- DAG.MinOccupancy;
- }
}
unsigned
@@ -1578,9 +1567,6 @@ bool GCNSchedStage::mayCauseSpilling(unsigned WavesAfter) {
}
void GCNSchedStage::revertScheduling() {
- DAG.RegionsWithMinOcc[RegionIdx] =
- PressureBefore.getOccupancy(ST, DAG.MFI.getDynamicVGPRBlockSize()) ==
- DAG.MinOccupancy;
LLVM_DEBUG(dbgs() << "Attempting to revert scheduling.\n");
DAG.RegionEnd = DAG.RegionBegin;
int SkippedDebugInstr = 0;
diff --git a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
index 94cd795..32139a9 100644
--- a/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
+++ b/llvm/lib/Target/AMDGPU/GCNSchedStrategy.h
@@ -250,9 +250,6 @@ class GCNScheduleDAGMILive final : public ScheduleDAGMILive {
// limit. Register pressure in these regions usually will result in spilling.
BitVector RegionsWithExcessRP;
- // Regions that has the same occupancy as the latest MinOccupancy
- BitVector RegionsWithMinOcc;
-
// Regions that have IGLP instructions (SCHED_GROUP_BARRIER or IGLP_OPT).
BitVector RegionsWithIGLPInstrs;
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.cpp b/llvm/lib/Target/AMDGPU/GCNSubtarget.cpp
index 0a0a107..0237a60 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.cpp
@@ -340,6 +340,43 @@ void GCNSubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
Policy.ShouldTrackLaneMasks = true;
}
+void GCNSubtarget::overridePostRASchedPolicy(MachineSchedPolicy &Policy,
+ const SchedRegion &Region) const {
+ const Function &F = Region.RegionBegin->getMF()->getFunction();
+ Attribute PostRADirectionAttr = F.getFnAttribute("amdgpu-post-ra-direction");
+ if (!PostRADirectionAttr.isValid())
+ return;
+
+ StringRef PostRADirectionStr = PostRADirectionAttr.getValueAsString();
+ if (PostRADirectionStr == "topdown") {
+ Policy.OnlyTopDown = true;
+ Policy.OnlyBottomUp = false;
+ } else if (PostRADirectionStr == "bottomup") {
+ Policy.OnlyTopDown = false;
+ Policy.OnlyBottomUp = true;
+ } else if (PostRADirectionStr == "bidirectional") {
+ Policy.OnlyTopDown = false;
+ Policy.OnlyBottomUp = false;
+ } else {
+ DiagnosticInfoOptimizationFailure Diag(
+ F, F.getSubprogram(), "invalid value for postRA direction attribute");
+ F.getContext().diagnose(Diag);
+ }
+
+ LLVM_DEBUG({
+ const char *DirStr = "default";
+ if (Policy.OnlyTopDown && !Policy.OnlyBottomUp)
+ DirStr = "topdown";
+ else if (!Policy.OnlyTopDown && Policy.OnlyBottomUp)
+ DirStr = "bottomup";
+ else if (!Policy.OnlyTopDown && !Policy.OnlyBottomUp)
+ DirStr = "bidirectional";
+
+ dbgs() << "Post-MI-sched direction (" << F.getName() << "): " << DirStr
+ << '\n';
+ });
+}
+
void GCNSubtarget::mirFileLoaded(MachineFunction &MF) const {
if (isWave32()) {
// Fix implicit $vcc operands after MIParser has verified that they match
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index fba1c5a..6fe3abc 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -1041,6 +1041,9 @@ public:
void overrideSchedPolicy(MachineSchedPolicy &Policy,
const SchedRegion &Region) const override;
+ void overridePostRASchedPolicy(MachineSchedPolicy &Policy,
+ const SchedRegion &Region) const override;
+
void mirFileLoaded(MachineFunction &MF) const override;
unsigned getMaxNumUserSGPRs() const {
@@ -1535,6 +1538,9 @@ public:
// \returns true if the target has V_{MIN|MAX}_{I|U}64 instructions.
bool hasIntMinMax64() const { return GFX1250Insts; }
+ // \returns true if the target has V_ADD_{MIN|MAX}_{I|U}32 instructions.
+ bool hasAddMinMaxInsts() const { return GFX1250Insts; }
+
// \returns true if the target has V_PK_ADD_{MIN|MAX}_{I|U}16 instructions.
bool hasPkAddMinMaxInsts() const { return GFX1250Insts; }
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
index 2a920f6..86d56855 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
@@ -149,7 +149,7 @@ void AMDGPUAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
uint32_t Offset = Fixup.getOffset();
- assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+ assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
// For each byte of the fragment that the fixup touches, mask in the bits from
// the fixup value.
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
index 11b072e..42c4d8b 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp
@@ -540,6 +540,8 @@ void AMDGPUInstPrinter::printImmediateV216(uint32_t Imm, uint8_t OpType,
printImmediateBFloat16(static_cast<uint16_t>(Imm), STI, O))
return;
break;
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
+ break;
default:
llvm_unreachable("bad operand type");
}
@@ -770,6 +772,7 @@ void AMDGPUInstPrinter::printRegularOperand(const MCInst *MI, unsigned OpNo,
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_IMM_V2BF16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
@@ -1790,4 +1793,14 @@ void AMDGPUInstPrinter::printBitOp3(const MCInst *MI, unsigned OpNo,
O << formatHex(static_cast<uint64_t>(Imm));
}
+void AMDGPUInstPrinter::printScaleSel(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ uint8_t Imm = MI->getOperand(OpNo).getImm();
+ if (!Imm)
+ return;
+
+ O << " scale_sel:" << formatDec(Imm);
+}
+
#include "AMDGPUGenAsmWriter.inc"
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h
index e0b7aa5..f6739b14 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.h
@@ -173,6 +173,8 @@ private:
const MCSubtargetInfo &STI, raw_ostream &O,
StringRef Prefix, bool PrintInHex, bool AlwaysPrint);
+ void printScaleSel(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O);
void printBitOp3(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
raw_ostream &O);
diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
index c49ad79..f358084 100644
--- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
+++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
@@ -341,6 +341,9 @@ std::optional<uint64_t> AMDGPUMCCodeEmitter::getLitEncoding(
return AMDGPU::getInlineEncodingV2BF16(static_cast<uint32_t>(Imm))
.value_or(255);
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
+ return 255;
+
case AMDGPU::OPERAND_KIMM32:
case AMDGPU::OPERAND_KIMM16:
case AMDGPU::OPERAND_KIMM64:
diff --git a/llvm/lib/Target/AMDGPU/SIDefines.h b/llvm/lib/Target/AMDGPU/SIDefines.h
index 40b8bcd..c564145 100644
--- a/llvm/lib/Target/AMDGPU/SIDefines.h
+++ b/llvm/lib/Target/AMDGPU/SIDefines.h
@@ -208,6 +208,7 @@ enum OperandType : unsigned {
OPERAND_REG_IMM_V2BF16,
OPERAND_REG_IMM_V2FP16,
OPERAND_REG_IMM_V2INT16,
+ OPERAND_REG_IMM_NOINLINE_V2FP16,
OPERAND_REG_IMM_V2INT32,
OPERAND_REG_IMM_V2FP32,
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index b77da4d..e934152 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -468,6 +468,7 @@ bool SIFoldOperandsImpl::canUseImmWithOpSel(const MachineInstr *MI,
case AMDGPU::OPERAND_REG_IMM_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2BF16:
case AMDGPU::OPERAND_REG_IMM_V2INT16:
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index f4d7408..4d67e4a 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -7199,7 +7199,7 @@ SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
}
- if (getTargetMachine().Options.UnsafeFPMath) {
+ if (Op->getFlags().hasApproximateFuncs()) {
SDValue Flags = Op.getOperand(1);
SDValue Src32 = DAG.getNode(ISD::FP_ROUND, DL, MVT::f32, Src, Flags);
return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Src32, Flags);
@@ -11294,8 +11294,7 @@ SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
EVT VT = Op.getValueType();
const SDNodeFlags Flags = Op->getFlags();
- bool AllowInaccurateRcp =
- Flags.hasApproximateFuncs() || DAG.getTarget().Options.UnsafeFPMath;
+ bool AllowInaccurateRcp = Flags.hasApproximateFuncs();
if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
// Without !fpmath accuracy information, we can't do more because we don't
@@ -11314,7 +11313,7 @@ SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
// 1.0 / sqrt(x) -> rsq(x)
- // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
+ // XXX - Is afn sufficient to do this for f64? The maximum ULP
// error seems really high at 2^29 ULP.
// 1.0 / x -> rcp(x)
return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
@@ -11348,8 +11347,7 @@ SDValue SITargetLowering::lowerFastUnsafeFDIV64(SDValue Op,
EVT VT = Op.getValueType();
const SDNodeFlags Flags = Op->getFlags();
- bool AllowInaccurateDiv =
- Flags.hasApproximateFuncs() || DAG.getTarget().Options.UnsafeFPMath;
+ bool AllowInaccurateDiv = Flags.hasApproximateFuncs();
if (!AllowInaccurateDiv)
return SDValue();
@@ -14601,7 +14599,7 @@ unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
return ISD::FMAD;
const TargetOptions &Options = DAG.getTarget().Options;
- if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
+ if ((Options.AllowFPOpFusion == FPOpFusion::Fast ||
(N0->getFlags().hasAllowContract() &&
N1->getFlags().hasAllowContract())) &&
isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
@@ -15724,9 +15722,9 @@ SDValue SITargetLowering::performFMACombine(SDNode *N,
// fdot2_f32_f16 always flushes fp32 denormal operand and output to zero,
// regardless of the denorm mode setting. Therefore,
- // unsafe-fp-math/fp-contract is sufficient to allow generating fdot2.
+ // fp-contract is sufficient to allow generating fdot2.
const TargetOptions &Options = DAG.getTarget().Options;
- if (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
+ if (Options.AllowFPOpFusion == FPOpFusion::Fast ||
(N->getFlags().hasAllowContract() &&
FMA->getFlags().hasAllowContract())) {
Op1 = Op1.getOperand(0);
@@ -16827,56 +16825,51 @@ SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_,
return std::pair(0U, RC);
}
- if (Constraint.starts_with("{") && Constraint.ends_with("}")) {
- StringRef RegName(Constraint.data() + 1, Constraint.size() - 2);
- if (RegName.consume_front("v")) {
+ auto [Kind, Idx, NumRegs] = AMDGPU::parseAsmConstraintPhysReg(Constraint);
+ if (Kind != '\0') {
+ if (Kind == 'v') {
RC = &AMDGPU::VGPR_32RegClass;
- } else if (RegName.consume_front("s")) {
+ } else if (Kind == 's') {
RC = &AMDGPU::SGPR_32RegClass;
- } else if (RegName.consume_front("a")) {
+ } else if (Kind == 'a') {
RC = &AMDGPU::AGPR_32RegClass;
}
if (RC) {
- uint32_t Idx;
- if (RegName.consume_front("[")) {
- uint32_t End;
- bool Failed = RegName.consumeInteger(10, Idx);
- Failed |= !RegName.consume_front(":");
- Failed |= RegName.consumeInteger(10, End);
- Failed |= !RegName.consume_back("]");
- if (!Failed) {
- uint32_t Width = (End - Idx + 1) * 32;
- // Prohibit constraints for register ranges with a width that does not
- // match the required type.
- if (VT.SimpleTy != MVT::Other && Width != VT.getSizeInBits())
+ if (NumRegs > 1) {
+ if (Idx >= RC->getNumRegs() || Idx + NumRegs - 1 > RC->getNumRegs())
+ return std::pair(0U, nullptr);
+
+ uint32_t Width = NumRegs * 32;
+ // Prohibit constraints for register ranges with a width that does not
+ // match the required type.
+ if (VT.SimpleTy != MVT::Other && Width != VT.getSizeInBits())
+ return std::pair(0U, nullptr);
+
+ MCRegister Reg = RC->getRegister(Idx);
+ if (SIRegisterInfo::isVGPRClass(RC))
+ RC = TRI->getVGPRClassForBitWidth(Width);
+ else if (SIRegisterInfo::isSGPRClass(RC))
+ RC = TRI->getSGPRClassForBitWidth(Width);
+ else if (SIRegisterInfo::isAGPRClass(RC))
+ RC = TRI->getAGPRClassForBitWidth(Width);
+ if (RC) {
+ Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, RC);
+ if (!Reg) {
+ // The register class does not contain the requested register,
+ // e.g., because it is an SGPR pair that would violate alignment
+ // requirements.
return std::pair(0U, nullptr);
- MCRegister Reg = RC->getRegister(Idx);
- if (SIRegisterInfo::isVGPRClass(RC))
- RC = TRI->getVGPRClassForBitWidth(Width);
- else if (SIRegisterInfo::isSGPRClass(RC))
- RC = TRI->getSGPRClassForBitWidth(Width);
- else if (SIRegisterInfo::isAGPRClass(RC))
- RC = TRI->getAGPRClassForBitWidth(Width);
- if (RC) {
- Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, RC);
- if (!Reg) {
- // The register class does not contain the requested register,
- // e.g., because it is an SGPR pair that would violate alignment
- // requirements.
- return std::pair(0U, nullptr);
- }
- return std::pair(Reg, RC);
}
+ return std::pair(Reg, RC);
}
- } else {
- // Check for lossy scalar/vector conversions.
- if (VT.isVector() && VT.getSizeInBits() != 32)
- return std::pair(0U, nullptr);
- bool Failed = RegName.getAsInteger(10, Idx);
- if (!Failed && Idx < RC->getNumRegs())
- return std::pair(RC->getRegister(Idx), RC);
}
+
+ // Check for lossy scalar/vector conversions.
+ if (VT.isVector() && VT.getSizeInBits() != 32)
+ return std::pair(0U, nullptr);
+ if (Idx < RC->getNumRegs())
+ return std::pair(RC->getRegister(Idx), RC);
}
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index c2da937..044a681 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -4438,6 +4438,8 @@ bool SIInstrInfo::isInlineConstant(int64_t Imm, uint8_t OperandType) const {
case AMDGPU::OPERAND_REG_IMM_V2BF16:
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
return AMDGPU::isInlinableLiteralV2BF16(Imm);
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
+ return false;
case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_INLINE_C_FP16: {
if (isInt<16>(Imm) || isUInt<16>(Imm)) {
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 83b0490..a3e20ba 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -1313,6 +1313,10 @@ def MatrixBFMT : CustomOperand<i32, 1, "MatrixBFMT">;
def MatrixAReuse : NamedBitOperand<"matrix_a_reuse">;
def MatrixBReuse : NamedBitOperand<"matrix_b_reuse">;
+def ScaleSel : NamedIntOperand<"scale_sel"> {
+ let Validator = "isUInt<3>";
+}
+
class KImmFPOperand<ValueType vt> : ImmOperand<vt> {
let OperandNamespace = "AMDGPU";
let OperandType = "OPERAND_KIMM"#vt.Size;
@@ -2859,6 +2863,7 @@ def VOP_I16_F16 : VOPProfile <[i16, f16, untyped, untyped]>;
def VOP_I16_I16 : VOPProfile <[i16, i16, untyped, untyped]>;
def VOP_BF16_BF16 : VOPProfile<[bf16, bf16, untyped, untyped]>;
def VOP1_I16_I32 : VOPProfile<[i16, i32, untyped, untyped]>;
+def VOP_I16_V2F16 : VOPProfile<[i16, v2f16, untyped, untyped]>;
def VOP_F16_F16_F16 : VOPProfile <[f16, f16, f16, untyped]>;
def VOP_F16_F16_I16 : VOPProfile <[f16, f16, i16, untyped]>;
@@ -2926,6 +2931,8 @@ def VOP_V2BF16_F32_F32 : VOPProfile <[v2bf16, f32, f32, untyped]>;
def VOP_V32F32_V6I32_F32 : VOPProfile <[v32f32, v6i32, f32, untyped]>;
def VOP_V32F16_V6I32_F32 : VOPProfile <[v32f16, v6i32, f32, untyped]>;
def VOP_V32BF16_V6I32_F32 : VOPProfile <[v32bf16, v6i32, f32, untyped]>;
+def VOP_V2BF16_F32_F32_I32 : VOPProfile <[v2bf16, f32, f32, i32]>;
+def VOP_V2F16_F32_F32_I32 : VOPProfile <[v2f16, f32, f32, i32]>;
def VOP_V6I32_V32F16_F32 : VOPProfile<[v6i32, v32f16, f32, untyped]>;
def VOP_V6I32_V32BF16_F32 : VOPProfile<[v6i32, v32bf16, f32, untyped]>;
def VOP_V6I32_V16F32_V16F32_F32 : VOPProfile<[v6i32, v16f32, v16f32, f32]>;
@@ -2941,6 +2948,13 @@ def VOP_BF16_F32_I32 : VOPProfile<[bf16, f32, i32, untyped]>;
def VOP_F16_F32_I32 : VOPProfile<[f16, f32, i32, untyped]>;
def VOP_I32_BF16_I32_F32 : VOPProfile<[i32, bf16, i32, f32]>;
def VOP_I32_F16_I32_F32 : VOPProfile<[i32, f16, i32, f32]>;
+def VOP_V8F16_V2I32_I32 : VOPProfile<[v8f16, v2i32, i32, untyped]>;
+def VOP_V8BF16_V2I32_I32 : VOPProfile<[v8bf16, v2i32, i32, untyped]>;
+def VOP_V8F16_I32_I32 : VOPProfile<[v8f16, i32, i32, untyped]>;
+def VOP_V8BF16_I32_I32 : VOPProfile<[v8bf16, i32, i32, untyped]>;
+def VOP_V16F32_V3I32_I32 : VOPProfile<[v16f32, v3i32, i32, untyped]>;
+def VOP_V8F32_V2I32_I32 : VOPProfile<[v8f32, v2i32, i32, untyped]>;
+def VOP_V8F32_I32_I32 : VOPProfile<[v8f32, i32, i32, untyped]>;
def VOP_I32_F32_I32_F32 : VOPProfile<[i32, f32, i32, f32]>;
def VOP_V6I32_V32BF16_I32_F32 : VOPProfile<[v6i32, v32bf16, i32, f32]>;
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index 218841d..36d1a3b 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -1218,6 +1218,8 @@ def VSrc_f64 : SrcRegOrImm9 <VS_64, "OPERAND_REG_IMM_FP64"> {
def VSrc_v2b32 : SrcRegOrImm9 <VS_64, "OPERAND_REG_IMM_V2INT32">;
def VSrc_v2f32 : SrcRegOrImm9 <VS_64, "OPERAND_REG_IMM_V2FP32">;
+def VSrc_NoInline_v2f16 : SrcRegOrImm9 <VS_32, "OPERAND_REG_IMM_NOINLINE_V2FP16">;
+
//===----------------------------------------------------------------------===//
// VRegSrc_* Operands with a VGPR
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 83e63ac..65fa088 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -1548,6 +1548,42 @@ bool shouldEmitConstantsToTextSection(const Triple &TT) {
return TT.getArch() == Triple::r600;
}
+static bool isValidRegPrefix(char C) {
+ return C == 'v' || C == 's' || C == 'a';
+}
+
+std::tuple<char, unsigned, unsigned>
+parseAsmConstraintPhysReg(StringRef Constraint) {
+ StringRef RegName = Constraint;
+ if (!RegName.consume_front("{") || !RegName.consume_back("}"))
+ return {};
+
+ char Kind = RegName.front();
+ if (!isValidRegPrefix(Kind))
+ return {};
+
+ RegName = RegName.drop_front();
+ if (RegName.consume_front("[")) {
+ unsigned Idx, End;
+ bool Failed = RegName.consumeInteger(10, Idx);
+ Failed |= !RegName.consume_front(":");
+ Failed |= RegName.consumeInteger(10, End);
+ Failed |= !RegName.consume_back("]");
+ if (!Failed) {
+ unsigned NumRegs = End - Idx + 1;
+ if (NumRegs > 1)
+ return {Kind, Idx, NumRegs};
+ }
+ } else {
+ unsigned Idx;
+ bool Failed = RegName.getAsInteger(10, Idx);
+ if (!Failed)
+ return {Kind, Idx, 1};
+ }
+
+ return {};
+}
+
std::pair<unsigned, unsigned>
getIntegerPairAttribute(const Function &F, StringRef Name,
std::pair<unsigned, unsigned> Default,
@@ -2659,6 +2695,7 @@ bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
case AMDGPU::OPERAND_REG_IMM_FP64:
case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_C_FP64:
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
@@ -3023,6 +3060,8 @@ bool isInlinableLiteralV216(uint32_t Literal, uint8_t OpType) {
case AMDGPU::OPERAND_REG_IMM_V2BF16:
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
return isInlinableLiteralV2BF16(Literal);
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
+ return false;
default:
llvm_unreachable("bad packed operand type");
}
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index c09a9d6..1252e35 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -1012,6 +1012,12 @@ bool isReadOnlySegment(const GlobalValue *GV);
/// target triple \p TT, false otherwise.
bool shouldEmitConstantsToTextSection(const Triple &TT);
+/// Returns a valid charcode or 0 in the first entry if this is a valid physical
+/// register constraint. Followed by the start register number, and the register
+/// width. Does not validate the number of registers exists in the class.
+std::tuple<char, unsigned, unsigned>
+parseAsmConstraintPhysReg(StringRef Constraint);
+
/// \returns Integer value requested using \p F's \p Name attribute.
///
/// \returns \p Default if attribute is not present.
@@ -1636,6 +1642,7 @@ inline unsigned getOperandSize(const MCOperandInfo &OpInfo) {
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_IMM_V2BF16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
+ case AMDGPU::OPERAND_REG_IMM_NOINLINE_V2FP16:
return 2;
default:
diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index 550ec9d..9de7d6d 100644
--- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -1344,6 +1344,8 @@ def V_FMAAK_F64 : VOP2_Pseudo<"v_fmaak_f64", VOP_MADAK_F64, [], "">;
} // End SubtargetPredicate = HasFmaakFmamkF64Insts, isReMaterializable = 1, FixedSize = 1, Size = 12, SchedRW = [Write64Bit]
let SubtargetPredicate = HasPkFmacF16Inst in {
+// FIXME: V_PK_FMAC_F16 is currently not used in instruction selection.
+// If this changes, ensure the DPP variant is not used for GFX11+.
defm V_PK_FMAC_F16 : VOP2Inst<"v_pk_fmac_f16", VOP_V2F16_V2F16_V2F16>;
} // End SubtargetPredicate = HasPkFmacF16Inst
@@ -1904,7 +1906,7 @@ multiclass VOP2_Real_FULL_with_name_gfx11_gfx12<bits<6> op, string opName,
VOP2_Real_FULL_with_name<GFX12Gen, op, opName, asmName>;
multiclass VOP2_Real_e32_gfx11_gfx12<bits<6> op> :
- VOP2Only_Real<GFX11Gen, op>, VOP2Only_Real<GFX12Gen, op>;
+ VOP2Only_Real_e32<GFX11Gen, op>, VOP2Only_Real_e32<GFX12Gen, op>;
multiclass VOP3Only_Realtriple_gfx11_gfx12<bits<10> op> :
VOP3Only_Realtriple<GFX11Gen, op>, VOP3Only_Realtriple<GFX12Gen, op>;
diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 5586dd8..1ffe39d 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -625,8 +625,9 @@ def shl_0_to_4 : PatFrag<
}];
}
-def VOP3_CVT_PK_F8_F32_Profile : VOP3_Profile<VOP_I32_F32_F32, VOP3_OPSEL> {
- defvar Tail = (ins VGPR_32:$vdst_in, op_sel0:$op_sel);
+class VOP3_CVT_PK_F8_F32_Profile<bit _HasClamp = 0> : VOP3_Profile<VOP_I32_F32_F32, VOP3_OPSEL> {
+ defvar Tail = !con(!if(_HasClamp, (ins Clamp:$clamp), (ins)),
+ (ins VGPR_32:$vdst_in, op_sel0:$op_sel));
let InsVOP3OpSel = !con(getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs,
0, HasModifiers, HasSrc2Mods,
HasOMod, Src0Mod, Src1Mod, Src2Mod>.ret,
@@ -636,12 +637,13 @@ def VOP3_CVT_PK_F8_F32_Profile : VOP3_Profile<VOP_I32_F32_F32, VOP3_OPSEL> {
HasSrc2Mods, HasOMod, Src0ModVOP3DPP, Src1ModVOP3DPP,
Src2ModVOP3DPP, false>.ret,
Tail);
- let HasClamp = 0;
+ let HasClamp = _HasClamp;
let HasExtVOP3DPP = 1;
}
-def VOP3_CVT_PK_F8_F32_Profile_fake16 : VOP3_Profile_Fake16<VOP_I16_F32_F32, VOP3_OPSEL> {
- defvar Tail = (ins VGPR_32:$vdst_in, op_sel0:$op_sel);
+class VOP3_CVT_PK_F8_F32_Profile_fake16<bit _HasClamp = 0> : VOP3_Profile_Fake16<VOP_I16_F32_F32, VOP3_OPSEL> {
+ defvar Tail = !con(!if(_HasClamp, (ins Clamp:$clamp), (ins)),
+ (ins VGPR_32:$vdst_in, op_sel0:$op_sel));
let InsVOP3OpSel = !con(getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs,
0, HasModifiers, HasSrc2Mods,
HasOMod, Src0Mod, Src1Mod, Src2Mod>.ret,
@@ -651,14 +653,15 @@ def VOP3_CVT_PK_F8_F32_Profile_fake16 : VOP3_Profile_Fake16<VOP_I16_F32_F32, VOP
HasSrc2Mods, HasOMod, Src0ModVOP3DPP, Src1ModVOP3DPP,
Src2ModVOP3DPP, false>.ret,
Tail);
- let HasClamp = 0;
+ let HasClamp = _HasClamp;
let HasExtVOP3DPP = 1;
}
// This t16 profile with vdst_in operand is for backward compatibility and is used
// for user controlled packing
-def VOP3_CVT_PK_F8_F32_Profile_t16 : VOP3_Profile_True16<VOP_I16_F32_F32, VOP3_OPSEL> {
- defvar Tail = (ins VGPR_16:$vdst_in, op_sel0:$op_sel);
+class VOP3_CVT_PK_F8_F32_Profile_t16<bit _HasClamp = 0> : VOP3_Profile_True16<VOP_I16_F32_F32, VOP3_OPSEL> {
+ defvar Tail = !con(!if(_HasClamp, (ins Clamp:$clamp), (ins)),
+ (ins VGPR_16:$vdst_in, op_sel0:$op_sel));
let InsVOP3OpSel = !con(getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs,
0, HasModifiers, HasSrc2Mods,
HasOMod, Src0Mod, Src1Mod, Src2Mod>.ret,
@@ -668,7 +671,7 @@ def VOP3_CVT_PK_F8_F32_Profile_t16 : VOP3_Profile_True16<VOP_I16_F32_F32, VOP3_O
HasSrc2Mods, HasOMod, Src0ModVOP3DPP, Src1ModVOP3DPP,
Src2ModVOP3DPP, false>.ret,
Tail);
- let HasClamp = 0;
+ let HasClamp = _HasClamp;
let HasExtVOP3DPP = 1;
}
@@ -702,10 +705,10 @@ def VOP3_CVT_SR_F8_F32_Profile : VOP3_Profile<VOPProfile<[i32, f32, i32, f32]>,
HasModifiers, DstVT>.ret);
}
-class VOP3_CVT_SR_F8_ByteSel_Profile<ValueType SrcVT> :
+class VOP3_CVT_SR_F8_ByteSel_Profile<ValueType SrcVT, bit _HasClamp = 0> :
VOP3_Profile<VOPProfile<[i32, SrcVT, i32, untyped]>> {
let HasFP8DstByteSel = 1;
- let HasClamp = 0;
+ let HasClamp = _HasClamp;
}
def IsPow2Plus1: PatLeaf<(i32 imm), [{
@@ -746,6 +749,13 @@ let SubtargetPredicate = HasMinimum3Maximum3F16, ReadsModeReg = 0 in {
defm V_MAXIMUM3_F16 : VOP3Inst_t16 <"v_maximum3_f16", VOP_F16_F16_F16_F16, AMDGPUfmaximum3>;
} // End SubtargetPredicate = isGFX12Plus, ReadsModeReg = 0
+let SubtargetPredicate = HasAddMinMaxInsts, isCommutable = 1, isReMaterializable = 1 in {
+ defm V_ADD_MAX_I32 : VOP3Inst <"v_add_max_i32", VOP_I32_I32_I32_I32>;
+ defm V_ADD_MAX_U32 : VOP3Inst <"v_add_max_u32", VOP_I32_I32_I32_I32>;
+ defm V_ADD_MIN_I32 : VOP3Inst <"v_add_min_i32", VOP_I32_I32_I32_I32>;
+ defm V_ADD_MIN_U32 : VOP3Inst <"v_add_min_u32", VOP_I32_I32_I32_I32>;
+}
+
defm V_ADD_I16 : VOP3Inst_t16 <"v_add_i16", VOP_I16_I16_I16>;
defm V_SUB_I16 : VOP3Inst_t16 <"v_sub_i16", VOP_I16_I16_I16>;
@@ -773,15 +783,23 @@ defm V_LSHL_ADD_U64 : VOP3Inst <"v_lshl_add_u64", V_LSHL_ADD_U64_PROF>;
let OtherPredicates = [HasFP8ConversionInsts], mayRaiseFPException = 0,
SchedRW = [WriteFloatCvt] in {
let Constraints = "$vdst = $vdst_in", DisableEncoding = "$vdst_in" in {
- defm V_CVT_PK_FP8_F32 : VOP3Inst_t16_with_profiles<"v_cvt_pk_fp8_f32", VOP3_CVT_PK_F8_F32_Profile,
- VOP3_CVT_PK_F8_F32_Profile_t16,
- VOP3_CVT_PK_F8_F32_Profile_fake16>;
- defm V_CVT_PK_BF8_F32 : VOP3Inst_t16_with_profiles<"v_cvt_pk_bf8_f32", VOP3_CVT_PK_F8_F32_Profile,
- VOP3_CVT_PK_F8_F32_Profile_t16,
- VOP3_CVT_PK_F8_F32_Profile_fake16>;
+ let OtherPredicates = [HasFP8ConversionInsts, NotHasFP8E5M3Insts] in
+ defm V_CVT_PK_FP8_F32 : VOP3Inst_t16_with_profiles<"v_cvt_pk_fp8_f32", VOP3_CVT_PK_F8_F32_Profile<>,
+ VOP3_CVT_PK_F8_F32_Profile_t16<>,
+ VOP3_CVT_PK_F8_F32_Profile_fake16<>>;
+ let OtherPredicates = [HasFP8ConversionInsts, HasFP8E5M3Insts] in
+ defm V_CVT_PK_FP8_F32_gfx1250 : VOP3Inst_t16_with_profiles<"v_cvt_pk_fp8_f32_gfx1250", VOP3_CVT_PK_F8_F32_Profile<true>,
+ VOP3_CVT_PK_F8_F32_Profile_t16<true>,
+ VOP3_CVT_PK_F8_F32_Profile_fake16<true>>;
+ defm V_CVT_PK_BF8_F32 : VOP3Inst_t16_with_profiles<"v_cvt_pk_bf8_f32", VOP3_CVT_PK_F8_F32_Profile<>,
+ VOP3_CVT_PK_F8_F32_Profile_t16<>,
+ VOP3_CVT_PK_F8_F32_Profile_fake16<>>;
let SubtargetPredicate = isGFX12Plus in {
- defm V_CVT_SR_FP8_F32_gfx12 : VOP3Inst<"v_cvt_sr_fp8_f32_gfx12", VOP3_CVT_SR_F8_ByteSel_Profile<f32>>;
+ let OtherPredicates = [HasFP8ConversionInsts, NotHasFP8E5M3Insts] in
+ defm V_CVT_SR_FP8_F32_gfx12 : VOP3Inst<"v_cvt_sr_fp8_f32_gfx12", VOP3_CVT_SR_F8_ByteSel_Profile<f32>>;
+ let OtherPredicates = [HasFP8ConversionInsts, HasFP8E5M3Insts] in
+ defm V_CVT_SR_FP8_F32_gfx1250 : VOP3Inst<"v_cvt_sr_fp8_f32_gfx1250", VOP3_CVT_SR_F8_ByteSel_Profile<f32, true>>;
defm V_CVT_SR_BF8_F32_gfx12 : VOP3Inst<"v_cvt_sr_bf8_f32_gfx12", VOP3_CVT_SR_F8_ByteSel_Profile<f32>>;
}
}
@@ -800,6 +818,11 @@ class Cvt_PK_F8_F32_Pat<SDPatternOperator node, int index, VOP3_Pseudo inst> : G
(inst !if(index, SRCMODS.DST_OP_SEL, 0), $src0, 0, $src1, $old, 0)
>;
+class Cvt_PK_F8_F32_E5M3_Pat<SDPatternOperator node, int index, VOP3_Pseudo inst, int Clamp> : GCNPat<
+ (i32 (node f32:$src0, f32:$src1, i32:$old, index)),
+ (inst !if(index, SRCMODS.DST_OP_SEL, 0), $src0, 0, $src1, Clamp, $old, 0)
+>;
+
multiclass Cvt_PK_F8_F32_t16_Pat<SDPatternOperator node, VOP3_Pseudo inst> {
def : GCNPat<
(i32 (node f32:$src0, f32:$src1, i32:$old, -1)),
@@ -815,6 +838,21 @@ def : GCNPat<
>;
}
+multiclass Cvt_PK_F8_F32_E5M3_t16_Pat<SDPatternOperator node, VOP3_Pseudo inst, int Clamp> {
+def : GCNPat<
+ (i32 (node f32:$src0, f32:$src1, i32:$old, -1)),
+ (REG_SEQUENCE VGPR_32,
+ (i16 (EXTRACT_SUBREG $old, lo16)), lo16,
+ (i16 (inst SRCMODS.DST_OP_SEL, $src0, 0, $src1, Clamp, (i16 (EXTRACT_SUBREG $old, hi16)), 0)), hi16)
+>;
+def : GCNPat<
+ (i32 (node f32:$src0, f32:$src1, i32:$old, 0)),
+ (REG_SEQUENCE VGPR_32,
+ (i16 (inst 0, $src0, 0, $src1, Clamp, (i16 (EXTRACT_SUBREG $old, lo16)), 0)), lo16,
+ (i16 (EXTRACT_SUBREG $old, hi16)), hi16)
+>;
+}
+
class Cvt_SR_F8_F32_Pat<SDPatternOperator node, bits<2> index, VOP3_Pseudo inst> : GCNPat<
(i32 (node f32:$src0, i32:$src1, i32:$old, index)),
(inst !if(index{1}, SRCMODS.DST_OP_SEL, 0), $src0, 0, $src1,
@@ -827,21 +865,37 @@ class Cvt_SR_F8_ByteSel_Pat<SDPatternOperator node, VOP3_Pseudo inst, ValueType
(inst $src0_modifiers, $src0, $src1_modifiers, $src1, $old, (as_i32timm $byte_sel))
>;
+class Cvt_SR_F8_ByteSel_E5M3_Pat<SDPatternOperator node, VOP3_Pseudo inst,
+ ValueType SrcVT, int Clamp> : GCNPat<
+ (i32 (node (VOP3Mods SrcVT:$src0, i32:$src0_modifiers), (VOP3Mods i32:$src1, i32:$src1_modifiers),
+ i32:$old, timm:$byte_sel)),
+ (inst $src0_modifiers, $src0, $src1_modifiers, $src1, Clamp, $old, (as_i32timm $byte_sel))
+>;
+
let OtherPredicates = [HasFP8ConversionInsts] in {
foreach Index = [0, -1] in {
let True16Predicate = NotHasTrue16BitInsts in {
- def : Cvt_PK_F8_F32_Pat<int_amdgcn_cvt_pk_fp8_f32, Index, V_CVT_PK_FP8_F32_e64>;
+ let OtherPredicates = [HasFP8ConversionInsts, NotHasFP8E5M3Insts] in
+ def : Cvt_PK_F8_F32_Pat<int_amdgcn_cvt_pk_fp8_f32, Index, V_CVT_PK_FP8_F32_e64>;
def : Cvt_PK_F8_F32_Pat<int_amdgcn_cvt_pk_bf8_f32, Index, V_CVT_PK_BF8_F32_e64>;
}
let True16Predicate = UseFakeTrue16Insts in {
def : Cvt_PK_F8_F32_Pat<int_amdgcn_cvt_pk_fp8_f32, Index, V_CVT_PK_FP8_F32_fake16_e64>;
def : Cvt_PK_F8_F32_Pat<int_amdgcn_cvt_pk_bf8_f32, Index, V_CVT_PK_BF8_F32_fake16_e64>;
+ let OtherPredicates = [HasFP8ConversionInsts, HasFP8E5M3Insts] in {
+ def : Cvt_PK_F8_F32_E5M3_Pat<int_amdgcn_cvt_pk_fp8_f32, Index, V_CVT_PK_FP8_F32_gfx1250_fake16_e64, DSTCLAMP.NONE>;
+ def : Cvt_PK_F8_F32_E5M3_Pat<int_amdgcn_cvt_pk_fp8_f32_e5m3, Index, V_CVT_PK_FP8_F32_gfx1250_fake16_e64, DSTCLAMP.ENABLE>;
+ }
}
}
let True16Predicate = UseRealTrue16Insts in {
defm : Cvt_PK_F8_F32_t16_Pat<int_amdgcn_cvt_pk_fp8_f32, V_CVT_PK_FP8_F32_t16_e64>;
defm : Cvt_PK_F8_F32_t16_Pat<int_amdgcn_cvt_pk_bf8_f32, V_CVT_PK_BF8_F32_t16_e64>;
+ let OtherPredicates = [HasFP8ConversionInsts, HasFP8E5M3Insts] in {
+ defm : Cvt_PK_F8_F32_E5M3_t16_Pat<int_amdgcn_cvt_pk_fp8_f32, V_CVT_PK_FP8_F32_gfx1250_t16_e64, DSTCLAMP.NONE>;
+ defm : Cvt_PK_F8_F32_E5M3_t16_Pat<int_amdgcn_cvt_pk_fp8_f32_e5m3, V_CVT_PK_FP8_F32_gfx1250_t16_e64, DSTCLAMP.ENABLE>;
+ }
}
let SubtargetPredicate = isGFX940Plus in {
@@ -852,7 +906,12 @@ let SubtargetPredicate = isGFX940Plus in {
}
let SubtargetPredicate = isGFX12Plus in {
- def : Cvt_SR_F8_ByteSel_Pat<int_amdgcn_cvt_sr_fp8_f32, V_CVT_SR_FP8_F32_gfx12_e64, f32>;
+ let OtherPredicates = [HasFP8ConversionInsts, NotHasFP8E5M3Insts] in
+ def : Cvt_SR_F8_ByteSel_Pat<int_amdgcn_cvt_sr_fp8_f32, V_CVT_SR_FP8_F32_gfx12_e64, f32>;
+ let OtherPredicates = [HasFP8ConversionInsts, HasFP8E5M3Insts] in {
+ def : Cvt_SR_F8_ByteSel_E5M3_Pat<int_amdgcn_cvt_sr_fp8_f32, V_CVT_SR_FP8_F32_gfx1250_e64, f32, DSTCLAMP.NONE>;
+ def : Cvt_SR_F8_ByteSel_E5M3_Pat<int_amdgcn_cvt_sr_fp8_f32_e5m3, V_CVT_SR_FP8_F32_gfx1250_e64, f32, DSTCLAMP.ENABLE>;
+ }
def : Cvt_SR_F8_ByteSel_Pat<int_amdgcn_cvt_sr_bf8_f32, V_CVT_SR_BF8_F32_gfx12_e64, f32>;
}
}
@@ -885,6 +944,13 @@ def : GCNPat<
(V_LSHL_ADD_U64_e64 VSrc_b64:$src0, VSrc_b32:$src1, VSrc_b64:$src2)
>;
+let SubtargetPredicate = HasAddMinMaxInsts in {
+def : ThreeOp_i32_Pats<add, smax, V_ADD_MAX_I32_e64>;
+def : ThreeOp_i32_Pats<add, umax, V_ADD_MAX_U32_e64>;
+def : ThreeOp_i32_Pats<add, smin, V_ADD_MIN_I32_e64>;
+def : ThreeOp_i32_Pats<add, umin, V_ADD_MIN_U32_e64>;
+}
+
def : VOPBinOpClampPat<saddsat, V_ADD_I32_e64, i32>;
def : VOPBinOpClampPat<ssubsat, V_SUB_I32_e64, i32>;
@@ -999,10 +1065,12 @@ class SrcAndDstSelToOpSelXForm<int modifier_idx, bit dest_sel> : SDNodeXForm<tim
unsigned Val = N->getZExtValue();
unsigned New = 0;
if (}] # modifier_idx # [{ == 0) {
- New = (}] # dest_sel # [{ == 1) ? ((Val & 0x2) ? (SISrcMods::OP_SEL_0 | SISrcMods::DST_OP_SEL) : SISrcMods::DST_OP_SEL)
- : ((Val & 0x2) ? SISrcMods::OP_SEL_0 : SISrcMods::NONE);
- } else if (}] # modifier_idx # [{== 1 || }] # modifier_idx # [{ == 2) {
- New = (Val & 0x1) ? SISrcMods::OP_SEL_0 : SISrcMods::NONE;
+ New = (}] # dest_sel # [{ == 1) ? ((Val & 0x1) ? (SISrcMods::OP_SEL_0 | SISrcMods::DST_OP_SEL) : SISrcMods::DST_OP_SEL)
+ : ((Val & 0x1) ? SISrcMods::OP_SEL_0 : SISrcMods::NONE);
+ } else if (}] # modifier_idx # [{== 1) {
+ New = (Val & 0x2) ? SISrcMods::OP_SEL_0 : SISrcMods::NONE;
+ } if (}] # modifier_idx # [{== 2) {
+ New = (Val & 0x1) ? SISrcMods::OP_SEL_0 : SISrcMods::NONE;
}
return CurDAG->getTargetConstant(New, SDLoc(N), MVT::i32);
}]>;
@@ -1596,6 +1664,7 @@ def bf16_fpround : PatFrag <(ops node:$src0), (fpround $src0), [{ return true;
let SubtargetPredicate = HasBF16ConversionInsts in {
let ReadsModeReg = 0 in {
defm V_CVT_PK_BF16_F32 : VOP3Inst<"v_cvt_pk_bf16_f32", VOP3_Profile<VOP_V2BF16_F32_F32>>;
+ defm V_CVT_SR_PK_BF16_F32 : VOP3Inst<"v_cvt_sr_pk_bf16_f32", VOP3_Profile<VOP_V2BF16_F32_F32_I32>, int_amdgcn_cvt_sr_pk_bf16_f32>;
}
def : GCNPat<(v2bf16 (bf16_fpround v2f32:$src)),
(V_CVT_PK_BF16_F32_e64 0, (EXTRACT_SUBREG VReg_64:$src, sub0), 0, (EXTRACT_SUBREG VReg_64:$src, sub1))>;
@@ -1606,6 +1675,85 @@ let SubtargetPredicate = HasBF16ConversionInsts in {
(V_CVT_PK_BF16_F32_e64 $src0_modifiers, $src0, 0, (f32 (IMPLICIT_DEF)))>;
}
+class VOP3_CVT_SCALE_PK_F16_F864_Profile<VOPProfile P> : VOP3_CVT_SCALEF32_PK_F864_Profile<P> {
+ let Src0RC64 = getVOP3VRegSrcForVT<Src0VT>.ret;
+ let Ins64 = !con(getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs,
+ HasClamp, HasModifiers, HasSrc2Mods,
+ HasOMod, Src0Mod, Src1Mod, Src2Mod>.ret,
+ (ins ScaleSel:$scale_sel));
+ let Asm64 = getAsmVOP3Base<NumSrcArgs, HasDst, HasClamp,
+ HasOpSel, HasOMod, IsVOP3P, HasNeg, HasSrc0Mods, HasSrc1Mods,
+ HasSrc2Mods, DstVT>.ret # "$scale_sel";
+}
+
+multiclass VOP3CvtScaleSelInst<string OpName, VOPProfile P, SDPatternOperator node> {
+ def _e64 : VOP3InstBase<OpName, VOP3_CVT_SCALE_PK_F16_F864_Profile<P>> {
+ let Pattern = [(set P.DstVT:$vdst, (node (P.Src0VT (VOP3Mods0 P.Src0VT:$src0)), i32:$src1, i32:$scale_sel))];
+ }
+}
+
+let Src0RC64 = VSrc_NoInline_v2f16 in {
+def VOP3_CVT_PK_F8_F16_Profile : VOP3_Profile<VOP_I16_V2F16>;
+def VOP3_CVT_PK_F8_F16_True16_Profile : VOP3_Profile_True16<VOP3_CVT_PK_F8_F16_Profile>;
+def VOP3_CVT_PK_F8_F16_Fake16_Profile : VOP3_Profile_Fake16<VOP3_CVT_PK_F8_F16_Profile>;
+}
+
+let ReadsModeReg = 0, IsPacked = 0, SubtargetPredicate = isGFX125xOnly in {
+ defm V_CVT_PK_FP8_F16_gfx1250 : VOP3Inst_t16_with_profiles<"v_cvt_pk_fp8_f16_gfx1250",
+ VOP3_CVT_PK_F8_F16_Profile,
+ VOP3_CVT_PK_F8_F16_True16_Profile,
+ VOP3_CVT_PK_F8_F16_Fake16_Profile,
+ int_amdgcn_cvt_pk_fp8_f16>;
+ defm V_CVT_PK_BF8_F16_gfx1250 : VOP3Inst_t16_with_profiles<"v_cvt_pk_bf8_f16_gfx1250",
+ VOP3_CVT_PK_F8_F16_Profile,
+ VOP3_CVT_PK_F8_F16_True16_Profile,
+ VOP3_CVT_PK_F8_F16_Fake16_Profile,
+ int_amdgcn_cvt_pk_bf8_f16>;
+}
+
+let HasClamp = 0, HasOpSel = 1 in {
+def VOP3_CVT_SR_F8_F16_Profile : VOP3_CVT_SR_F8_ByteSel_Profile<f16>;
+def VOP3_CVT_SR_F8_F16_True16_Profile : VOP3_Profile_True16<VOP3_CVT_SR_F8_F16_Profile>;
+def VOP3_CVT_SR_F8_F16_Fake16_Profile : VOP3_Profile_Fake16<VOP3_CVT_SR_F8_F16_Profile>;
+}
+
+let SubtargetPredicate = isGFX1250Plus in {
+ let ReadsModeReg = 0 in {
+ defm V_CVT_SR_PK_F16_F32 : VOP3Inst<"v_cvt_sr_pk_f16_f32", VOP3_Profile<VOP_V2F16_F32_F32_I32>, int_amdgcn_cvt_sr_pk_f16_f32>;
+
+ // These instructions have non-standard use of op_sel. They are using bits 2 and 3 of opsel
+ // to select a byte in the vdst. Bits 0 and 1 are unused.
+ let Constraints = "$vdst = $vdst_in", DisableEncoding = "$vdst_in" in {
+ defm V_CVT_SR_FP8_F16 : VOP3Inst_t16_with_profiles<"v_cvt_sr_fp8_f16", VOP3_CVT_SR_F8_F16_Profile,
+ VOP3_CVT_SR_F8_F16_True16_Profile, VOP3_CVT_SR_F8_F16_Fake16_Profile>;
+ defm V_CVT_SR_BF8_F16 : VOP3Inst_t16_with_profiles<"v_cvt_sr_bf8_f16", VOP3_CVT_SR_F8_F16_Profile,
+ VOP3_CVT_SR_F8_F16_True16_Profile, VOP3_CVT_SR_F8_F16_Fake16_Profile>;
+ }
+
+ let Constraints = "@earlyclobber $vdst" in {
+ defm V_CVT_SCALE_PK8_F16_FP8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f16_fp8", VOP_V8F16_V2I32_I32, int_amdgcn_cvt_scale_pk8_f16_fp8>;
+ defm V_CVT_SCALE_PK8_BF16_FP8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_bf16_fp8", VOP_V8BF16_V2I32_I32, int_amdgcn_cvt_scale_pk8_bf16_fp8>;
+ defm V_CVT_SCALE_PK8_F16_BF8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f16_bf8", VOP_V8F16_V2I32_I32, int_amdgcn_cvt_scale_pk8_f16_bf8>;
+ defm V_CVT_SCALE_PK8_BF16_BF8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_bf16_bf8", VOP_V8BF16_V2I32_I32, int_amdgcn_cvt_scale_pk8_bf16_bf8>;
+ defm V_CVT_SCALE_PK8_F32_FP8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f32_fp8", VOP_V8F32_V2I32_I32, int_amdgcn_cvt_scale_pk8_f32_fp8>;
+ defm V_CVT_SCALE_PK8_F32_BF8 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f32_bf8", VOP_V8F32_V2I32_I32, int_amdgcn_cvt_scale_pk8_f32_bf8>;
+ } // End Constraints = "@earlyclobber $vdst"
+
+ defm V_CVT_SCALE_PK8_F16_FP4 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f16_fp4", VOP_V8F16_I32_I32, int_amdgcn_cvt_scale_pk8_f16_fp4>;
+ defm V_CVT_SCALE_PK8_BF16_FP4 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_bf16_fp4", VOP_V8BF16_I32_I32, int_amdgcn_cvt_scale_pk8_bf16_fp4>;
+ defm V_CVT_SCALE_PK8_F32_FP4 : VOP3CvtScaleSelInst<"v_cvt_scale_pk8_f32_fp4", VOP_V8F32_I32_I32, int_amdgcn_cvt_scale_pk8_f32_fp4>;
+ } // End ReadsModeReg = 0
+
+ let True16Predicate = UseRealTrue16Insts in {
+ def : Cvt_SR_F8_ByteSel_Pat<int_amdgcn_cvt_sr_fp8_f16, V_CVT_SR_FP8_F16_t16_e64, f16>;
+ def : Cvt_SR_F8_ByteSel_Pat<int_amdgcn_cvt_sr_bf8_f16, V_CVT_SR_BF8_F16_t16_e64, f16>;
+ }
+ let True16Predicate = UseFakeTrue16Insts in {
+ def : Cvt_SR_F8_ByteSel_Pat<int_amdgcn_cvt_sr_fp8_f16, V_CVT_SR_FP8_F16_fake16_e64, f16>;
+ def : Cvt_SR_F8_ByteSel_Pat<int_amdgcn_cvt_sr_bf8_f16, V_CVT_SR_BF8_F16_fake16_e64, f16>;
+ }
+} // End SubtargetPredicate = isGFX1250Plus
+
class Cvt_Scale_Sr_F32ToBF16F16_Pat<SDPatternOperator node, VOP3_Pseudo inst, ValueType DstTy> : GCNPat<
(DstTy (node DstTy:$vdst_in, f32:$src0, i32:$src1, timm:$word_sel)),
(inst (DstSelToOpSelXForm $word_sel), $src0, 0, $src1, VGPR_32:$vdst_in)
@@ -1821,11 +1969,10 @@ defm V_MIN_U64 : VOP3Only_Realtriple_gfx1250<0x318>;
defm V_MAX_U64 : VOP3Only_Realtriple_gfx1250<0x319>;
defm V_MIN_I64 : VOP3Only_Realtriple_gfx1250<0x31a>;
defm V_MAX_I64 : VOP3Only_Realtriple_gfx1250<0x31b>;
-
-defm V_CVT_PK_FP8_F32 : VOP3Only_Realtriple_t16_and_fake16_gfx12<0x369, "v_cvt_pk_fp8_f32">;
-defm V_CVT_PK_BF8_F32 : VOP3Only_Realtriple_t16_and_fake16_gfx12<0x36a, "v_cvt_pk_bf8_f32">;
-defm V_CVT_SR_FP8_F32_gfx12 : VOP3_Realtriple_with_name_gfx12<0x36b, "V_CVT_SR_FP8_F32_gfx12", "v_cvt_sr_fp8_f32" >;
-defm V_CVT_SR_BF8_F32_gfx12 : VOP3_Realtriple_with_name_gfx12<0x36c, "V_CVT_SR_BF8_F32_gfx12", "v_cvt_sr_bf8_f32">;
+defm V_ADD_MAX_I32 : VOP3Only_Realtriple_gfx1250<0x25e>;
+defm V_ADD_MAX_U32 : VOP3Only_Realtriple_gfx1250<0x25f>;
+defm V_ADD_MIN_I32 : VOP3Only_Realtriple_gfx1250<0x260>;
+defm V_ADD_MIN_U32 : VOP3Only_Realtriple_gfx1250<0x261>;
//===----------------------------------------------------------------------===//
// GFX11, GFX12
@@ -1987,6 +2134,13 @@ defm V_AND_B16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x36
defm V_OR_B16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x363, "v_or_b16">;
defm V_XOR_B16 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x364, "v_xor_b16">;
+defm V_CVT_PK_FP8_F32 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12_not_gfx1250<0x369, "v_cvt_pk_fp8_f32">;
+defm V_CVT_PK_FP8_F32_gfx1250 : VOP3Only_Realtriple_t16_and_fake16_gfx1250<0x369, "v_cvt_pk_fp8_f32">;
+defm V_CVT_PK_BF8_F32 : VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12<0x36a, "v_cvt_pk_bf8_f32">;
+defm V_CVT_SR_FP8_F32_gfx12 : VOP3_Realtriple_with_name_gfx11_gfx12_not_gfx1250<0x36b, "V_CVT_SR_FP8_F32_gfx12", "v_cvt_sr_fp8_f32">;
+defm V_CVT_SR_FP8_F32_gfx1250 : VOP3Only_Realtriple_with_name_gfx1250<0x36b, "V_CVT_SR_FP8_F32_gfx1250", "v_cvt_sr_fp8_f32">;
+defm V_CVT_SR_BF8_F32_gfx12 : VOP3_Realtriple_with_name_gfx11_gfx12<0x36c, "V_CVT_SR_BF8_F32_gfx12", "v_cvt_sr_bf8_f32">;
+
let AssemblerPredicate = isGFX11Plus in {
def : AMDGPUMnemonicAlias<"v_add3_nc_u32", "v_add3_u32">;
def : AMDGPUMnemonicAlias<"v_xor_add_u32", "v_xad_u32">;
@@ -1994,7 +2148,25 @@ let AssemblerPredicate = isGFX11Plus in {
// These instructions differ from GFX12 variant by supporting DPP:
defm V_LSHL_ADD_U64 : VOP3Only_Realtriple_gfx1250<0x252>;
+defm V_ASHR_PK_I8_I32 : VOP3Only_Realtriple_gfx1250<0x290>;
+defm V_ASHR_PK_U8_I32 : VOP3Only_Realtriple_gfx1250<0x291>;
+defm V_CVT_SCALE_PK8_F16_FP4 : VOP3Only_ScaleSel_Real_gfx1250<0x29f>;
+defm V_CVT_SCALE_PK8_BF16_FP4 : VOP3Only_ScaleSel_Real_gfx1250<0x2a0>;
+defm V_CVT_SCALE_PK8_F32_FP4 : VOP3Only_ScaleSel_Real_gfx1250<0x2a1>;
+defm V_CVT_SCALE_PK8_F16_FP8 : VOP3Only_ScaleSel_Real_gfx1250<0x2a8>;
+defm V_CVT_SCALE_PK8_BF16_FP8 : VOP3Only_ScaleSel_Real_gfx1250<0x2a9>;
+defm V_CVT_SCALE_PK8_F32_FP8 : VOP3Only_ScaleSel_Real_gfx1250<0x2aa>;
+defm V_CVT_SCALE_PK8_F16_BF8 : VOP3Only_ScaleSel_Real_gfx1250<0x2ab>;
+defm V_CVT_SCALE_PK8_BF16_BF8 : VOP3Only_ScaleSel_Real_gfx1250<0x2ac>;
+defm V_CVT_SCALE_PK8_F32_BF8 : VOP3Only_ScaleSel_Real_gfx1250<0x2ad>;
defm V_CVT_PK_BF16_F32 : VOP3Only_Realtriple_gfx1250<0x36d>;
+defm V_CVT_SR_PK_BF16_F32 : VOP3Only_Realtriple_gfx1250<0x36e>;
+defm V_CVT_PK_F16_F32 : VOP3Only_Realtriple_gfx1250<0x36f>;
+defm V_CVT_SR_PK_F16_F32 : VOP3Only_Realtriple_gfx1250<0x370>;
+defm V_CVT_PK_FP8_F16_gfx1250 : VOP3Only_Realtriple_t16_and_fake16_gfx1250<0x372, "v_cvt_pk_fp8_f16">;
+defm V_CVT_PK_BF8_F16_gfx1250 : VOP3Only_Realtriple_t16_and_fake16_gfx1250<0x373, "v_cvt_pk_bf8_f16">;
+defm V_CVT_SR_FP8_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx1250<0x374>;
+defm V_CVT_SR_BF8_F16 : VOP3Only_Realtriple_t16_and_fake16_gfx1250<0x375>;
//===----------------------------------------------------------------------===//
// GFX10.
diff --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td
index badbba9..f027ab0 100644
--- a/llvm/lib/Target/AMDGPU/VOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td
@@ -414,6 +414,13 @@ class VOP3a_BITOP3_gfx12<bits<10> op, VOPProfile p> : VOP3e_gfx11_gfx12<op, p> {
let Inst{14} = !if(p.HasOpSel, src0_modifiers{3}, 0);
}
+class VOP3a_ScaleSel_gfx1250<bits<10> op, VOPProfile p> : VOP3e_gfx11_gfx12<op, p> {
+ bits<3> scale_sel;
+
+ let Inst{13-11} = scale_sel;
+ let Inst{14} = 0;
+}
+
class VOP3Interp_gfx10<bits<10> op, VOPProfile p> : VOP3e_gfx10<op, p> {
bits<6> attr;
bits<2> attrchan;
@@ -2010,6 +2017,30 @@ multiclass VOP3_BITOP3_Real_Base<GFXGen Gen, bits<10> op, string asmName> {
}
}
+multiclass VOP3Only_ScaleSel_Real_gfx1250<bits<10> op> {
+ defvar ps = !cast<VOP_Pseudo>(NAME#"_e64");
+ def _e64_gfx1250 :
+ VOP3_Real_Gen<ps, GFX1250Gen>,
+ VOP3a_ScaleSel_gfx1250<op, ps.Pfl>;
+}
+
+multiclass VOP3Only_Realtriple_t16_gfx11_gfx12_not_gfx1250<bits<10> op, string asmName, string opName = NAME,
+ string pseudo_mnemonic = "", bit isSingle = 0> :
+ VOP3_Realtriple_with_name<GFX11Gen, op, opName, asmName, pseudo_mnemonic, isSingle>,
+ VOP3_Realtriple_with_name<GFX12Not12_50Gen, op, opName, asmName, pseudo_mnemonic, isSingle>;
+
+multiclass VOP3Only_Realtriple_t16_and_fake16_gfx11_gfx12_not_gfx1250<bits<10> op, string asmName,
+ string opName = NAME, string pseudo_mnemonic = ""> {
+ defm _t16 : VOP3Only_Realtriple_t16_gfx11_gfx12_not_gfx1250<op, asmName, opName#"_t16", pseudo_mnemonic, 1>;
+ defm _fake16 : VOP3Only_Realtriple_t16_gfx11_gfx12_not_gfx1250<op, asmName, opName#"_fake16", pseudo_mnemonic, 1>;
+}
+
+multiclass VOP3_Realtriple_with_name_gfx11_gfx12_not_gfx1250<bits<10> op, string opName,
+ string asmName, string pseudo_mnemonic = "",
+ bit isSingle = 0> :
+ VOP3_Realtriple_with_name<GFX11Gen, op, opName, asmName, pseudo_mnemonic, isSingle>,
+ VOP3_Realtriple_with_name<GFX12Not12_50Gen, op, opName, asmName, pseudo_mnemonic, isSingle>;
+
//===----------------------------------------------------------------------===//
// VOP3 GFX11
//===----------------------------------------------------------------------===//
@@ -2071,6 +2102,15 @@ multiclass VOP3Only_Real_Base_gfx1250<bits<10> op> :
multiclass VOP3Only_Realtriple_gfx1250<bits<10> op, bit isSingle = 0> :
VOP3_Realtriple<GFX1250Gen, op, isSingle>;
+multiclass VOP3Only_Realtriple_with_name_gfx1250<bits<10> op, string opName,
+ string asmName, string pseudo_mnemonic = "",
+ bit isSingle = 0> :
+ VOP3_Realtriple_with_name<GFX1250Gen, op, opName, asmName, pseudo_mnemonic, isSingle>;
+
+multiclass VOP3Only_Realtriple_t16_gfx1250<bits<10> op, string asmName = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic,
+ string opName = NAME, string pseudo_mnemonic = "", bit isSingle = 0> :
+ VOP3Only_Realtriple_with_name_gfx1250<op, opName, asmName, pseudo_mnemonic, isSingle>;
+
multiclass VOP3_Realtriple_t16_gfx12<bits<10> op, string asmName, string opName = NAME,
string pseudo_mnemonic = "", bit isSingle = 0> :
VOP3_Realtriple_with_name<GFX12Gen, op, opName, asmName, pseudo_mnemonic, isSingle>;
@@ -2091,6 +2131,13 @@ multiclass VOP3Only_Realtriple_t16_and_fake16_gfx12<bits<10> op, string asmName,
defm _fake16 : VOP3Only_Realtriple_t16_gfx12<op, asmName, opName#"_fake16", pseudo_mnemonic>;
}
+multiclass VOP3Only_Realtriple_t16_and_fake16_gfx1250<bits<10> op,
+ string asmName = !cast<VOP3_Pseudo>(NAME#"_e64").Mnemonic,
+ string opName = NAME, string pseudo_mnemonic = ""> {
+ defm _t16 : VOP3Only_Realtriple_t16_gfx1250<op, asmName, opName#"_t16", pseudo_mnemonic>;
+ defm _fake16 : VOP3Only_Realtriple_t16_gfx1250<op, asmName, opName#"_fake16", pseudo_mnemonic>;
+}
+
multiclass VOP3be_Real_with_name_gfx12<bits<10> op, string opName,
string asmName, bit isSingle = 0> {
defvar ps = !cast<VOP3_Pseudo>(opName#"_e64");
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 066b392..bd4b75f 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -2423,6 +2423,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
CallingConv::ID CallConv = CLI.CallConv;
bool doesNotRet = CLI.DoesNotReturn;
bool isVarArg = CLI.IsVarArg;
+ const CallBase *CB = CLI.CB;
MachineFunction &MF = DAG.getMachineFunction();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
@@ -2446,6 +2447,10 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
!Subtarget->noBTIAtReturnTwice())
GuardWithBTI = AFI->branchTargetEnforcement();
+ // Set type id for call site info.
+ if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall())
+ CSInfo = MachineFunction::CallSiteInfo(*CB);
+
// Determine whether this is a non-secure function call.
if (CLI.CB && CLI.CB->getAttributes().hasFnAttr("cmse_nonsecure_call"))
isCmseNSCall = true;
diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index 146fc67..dfa3de3c 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -1125,7 +1125,7 @@ void ARMAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
const unsigned NumBytes = getFixupKindNumBytes(Kind);
unsigned Offset = Fixup.getOffset();
- assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+ assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
// Used to point to big endian bytes.
unsigned FullSizeBytes;
diff --git a/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp b/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp
index 128cc0b..38444f9 100644
--- a/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp
+++ b/llvm/lib/Target/AVR/MCTargetDesc/AVRAsmBackend.cpp
@@ -398,7 +398,7 @@ void AVRAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
Value <<= Info.TargetOffset;
unsigned Offset = Fixup.getOffset();
- assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+ assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
// For each byte of the fragment that the fixup touches, mask in the
// bits from the fixup value.
diff --git a/llvm/lib/Target/CSKY/MCTargetDesc/CSKYAsmBackend.cpp b/llvm/lib/Target/CSKY/MCTargetDesc/CSKYAsmBackend.cpp
index 694d9ea..1bd82fad 100644
--- a/llvm/lib/Target/CSKY/MCTargetDesc/CSKYAsmBackend.cpp
+++ b/llvm/lib/Target/CSKY/MCTargetDesc/CSKYAsmBackend.cpp
@@ -220,7 +220,7 @@ void CSKYAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
unsigned Offset = Fixup.getOffset();
unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8;
- assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+ assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
// For each byte of the fragment that the fixup touches, mask in the
// bits from the fixup value.
diff --git a/llvm/lib/Target/DirectX/DXILRootSignature.cpp b/llvm/lib/Target/DirectX/DXILRootSignature.cpp
index ebdfcaa..a4f5086 100644
--- a/llvm/lib/Target/DirectX/DXILRootSignature.cpp
+++ b/llvm/lib/Target/DirectX/DXILRootSignature.cpp
@@ -17,7 +17,6 @@
#include "llvm/Analysis/DXILMetadataAnalysis.h"
#include "llvm/BinaryFormat/DXContainer.h"
#include "llvm/Frontend/HLSL/RootSignatureMetadata.h"
-#include "llvm/Frontend/HLSL/RootSignatureValidations.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
@@ -111,14 +110,25 @@ analyzeModule(Module &M) {
reportError(Ctx, "Root Element is not a metadata node.");
continue;
}
- mcdxbc::RootSignatureDesc RSD;
- if (std::optional<uint32_t> Version = extractMdIntValue(RSDefNode, 2))
- RSD.Version = *Version;
- else {
+ std::optional<uint32_t> V = extractMdIntValue(RSDefNode, 2);
+ if (!V.has_value()) {
reportError(Ctx, "Invalid RSDefNode value, expected constant int");
continue;
}
+ llvm::hlsl::rootsig::MetadataParser MDParser(RootElementListNode);
+ llvm::Expected<mcdxbc::RootSignatureDesc> RSDOrErr =
+ MDParser.ParseRootSignature(V.value());
+
+ if (!RSDOrErr) {
+ handleAllErrors(RSDOrErr.takeError(), [&](ErrorInfoBase &EIB) {
+ Ctx->emitError(EIB.message());
+ });
+ continue;
+ }
+
+ auto &RSD = *RSDOrErr;
+
// Clang emits the root signature data in dxcontainer following a specific
// sequence. First the header, then the root parameters. So the header
// offset will always equal to the header size.
@@ -127,12 +137,6 @@ analyzeModule(Module &M) {
// static sampler offset is calculated when writting dxcontainer.
RSD.StaticSamplersOffset = 0u;
- hlsl::rootsig::MetadataParser MDParser(RootElementListNode);
-
- if (MDParser.ParseRootSignature(Ctx, RSD)) {
- return RSDMap;
- }
-
RSDMap.insert(std::make_pair(F, RSD));
}
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
index 7d3074b..d5b7a75 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
@@ -669,7 +669,7 @@ void HexagonAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
// to a real offset before we can use it.
uint32_t Offset = Fixup.getOffset();
unsigned NumBytes = getFixupKindNumBytes(Kind);
- assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+ assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
char *InstAddr = Data.data() + Offset;
Value = adjustFixupValue(Kind, FixupValue);
diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
index d9ea88c..858f3d0 100644
--- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
+++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
@@ -169,7 +169,7 @@ void LoongArchAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
unsigned Offset = Fixup.getOffset();
unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8;
- assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+ assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
// For each byte of the fragment that the fixup touches, mask in the
// bits from the fixup value.
for (unsigned I = 0; I != NumBytes; ++I) {
diff --git a/llvm/lib/Target/M68k/MCTargetDesc/M68kAsmBackend.cpp b/llvm/lib/Target/M68k/MCTargetDesc/M68kAsmBackend.cpp
index 5e03903..7ef705d 100644
--- a/llvm/lib/Target/M68k/MCTargetDesc/M68kAsmBackend.cpp
+++ b/llvm/lib/Target/M68k/MCTargetDesc/M68kAsmBackend.cpp
@@ -85,7 +85,7 @@ void M68kAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
Asm->getWriter().recordRelocation(F, Fixup, Target, Value);
unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind());
- assert(Fixup.getOffset() + Size <= Data.size() && "Invalid fixup offset!");
+ assert(Fixup.getOffset() + Size <= F.getSize() && "Invalid fixup offset!");
// Check that uppper bits are either all zeros or all ones.
// Specifically ignore overflow/underflow as long as the leakage is
// limited to the lower bits. This is to remain compatible with
diff --git a/llvm/lib/Target/MSP430/MCTargetDesc/MSP430AsmBackend.cpp b/llvm/lib/Target/MSP430/MCTargetDesc/MSP430AsmBackend.cpp
index 29e5bfa..b513503 100644
--- a/llvm/lib/Target/MSP430/MCTargetDesc/MSP430AsmBackend.cpp
+++ b/llvm/lib/Target/MSP430/MCTargetDesc/MSP430AsmBackend.cpp
@@ -120,7 +120,7 @@ void MSP430AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
unsigned Offset = Fixup.getOffset();
unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8;
- assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+ assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
// For each byte of the fragment that the fixup touches, mask in the
// bits from the fixup value.
diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
index 7a8395a..d9680c7 100644
--- a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
+++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp
@@ -1034,14 +1034,12 @@ MCELFStreamer &MipsTargetELFStreamer::getStreamer() {
void MipsTargetELFStreamer::emitGPRel32Value(const MCExpr *Value) {
auto &S = getStreamer();
- S.ensureHeadroom(4);
S.addFixup(Value, Mips::fixup_Mips_GPREL32);
S.appendContents(4, 0);
}
void MipsTargetELFStreamer::emitGPRel64Value(const MCExpr *Value) {
auto &S = getStreamer();
- S.ensureHeadroom(8);
// fixup_Mips_GPREL32 desginates R_MIPS_GPREL32+R_MIPS_64 on MIPS64.
S.addFixup(Value, Mips::fixup_Mips_GPREL32);
S.appendContents(8, 0);
@@ -1049,28 +1047,24 @@ void MipsTargetELFStreamer::emitGPRel64Value(const MCExpr *Value) {
void MipsTargetELFStreamer::emitDTPRel32Value(const MCExpr *Value) {
auto &S = getStreamer();
- S.ensureHeadroom(4);
S.addFixup(Value, Mips::fixup_Mips_DTPREL32);
S.appendContents(4, 0);
}
void MipsTargetELFStreamer::emitDTPRel64Value(const MCExpr *Value) {
auto &S = getStreamer();
- S.ensureHeadroom(8);
S.addFixup(Value, Mips::fixup_Mips_DTPREL64);
S.appendContents(8, 0);
}
void MipsTargetELFStreamer::emitTPRel32Value(const MCExpr *Value) {
auto &S = getStreamer();
- S.ensureHeadroom(4);
S.addFixup(Value, Mips::fixup_Mips_TPREL32);
S.appendContents(4, 0);
}
void MipsTargetELFStreamer::emitTPRel64Value(const MCExpr *Value) {
auto &S = getStreamer();
- S.ensureHeadroom(8);
S.addFixup(Value, Mips::fixup_Mips_TPREL64);
S.appendContents(8, 0);
}
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index ec6b382..881ba8e 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -3341,6 +3341,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
bool &IsTailCall = CLI.IsTailCall;
CallingConv::ID CallConv = CLI.CallConv;
bool IsVarArg = CLI.IsVarArg;
+ const CallBase *CB = CLI.CB;
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
@@ -3397,8 +3398,11 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Get a count of how many bytes are to be pushed on the stack.
unsigned StackSize = CCInfo.getStackSize();
- // Call site info for function parameters tracking.
+ // Call site info for function parameters tracking and call base type info.
MachineFunction::CallSiteInfo CSInfo;
+ // Set type id for call site info.
+ if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall())
+ CSInfo = MachineFunction::CallSiteInfo(*CB);
// Check if it's really possible to do a tail call. Restrict it to functions
// that are part of this compilation unit.
diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
index 8eec915..ee1ca45 100644
--- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.cpp
@@ -391,16 +391,6 @@ void NVPTXInstPrinter::printMemOperand(const MCInst *MI, int OpNum,
}
}
-void NVPTXInstPrinter::printOffseti32imm(const MCInst *MI, int OpNum,
- raw_ostream &O) {
- auto &Op = MI->getOperand(OpNum);
- assert(Op.isImm() && "Invalid operand");
- if (Op.getImm() != 0) {
- O << "+";
- printOperand(MI, OpNum, O);
- }
-}
-
void NVPTXInstPrinter::printHexu32imm(const MCInst *MI, int OpNum,
raw_ostream &O) {
int64_t Imm = MI->getOperand(OpNum).getImm();
diff --git a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h
index c3ff346..92155b0 100644
--- a/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h
+++ b/llvm/lib/Target/NVPTX/MCTargetDesc/NVPTXInstPrinter.h
@@ -46,7 +46,6 @@ public:
StringRef Modifier = {});
void printMemOperand(const MCInst *MI, int OpNum, raw_ostream &O,
StringRef Modifier = {});
- void printOffseti32imm(const MCInst *MI, int OpNum, raw_ostream &O);
void printHexu32imm(const MCInst *MI, int OpNum, raw_ostream &O);
void printProtoIdent(const MCInst *MI, int OpNum, raw_ostream &O);
void printPrmtMode(const MCInst *MI, int OpNum, raw_ostream &O);
diff --git a/llvm/lib/Target/NVPTX/NVPTXForwardParams.cpp b/llvm/lib/Target/NVPTX/NVPTXForwardParams.cpp
index cd40481..a349609 100644
--- a/llvm/lib/Target/NVPTX/NVPTXForwardParams.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXForwardParams.cpp
@@ -56,15 +56,12 @@ static bool traverseMoveUse(MachineInstr &U, const MachineRegisterInfo &MRI,
case NVPTX::LD_i16:
case NVPTX::LD_i32:
case NVPTX::LD_i64:
- case NVPTX::LD_i8:
case NVPTX::LDV_i16_v2:
case NVPTX::LDV_i16_v4:
case NVPTX::LDV_i32_v2:
case NVPTX::LDV_i32_v4:
case NVPTX::LDV_i64_v2:
- case NVPTX::LDV_i64_v4:
- case NVPTX::LDV_i8_v2:
- case NVPTX::LDV_i8_v4: {
+ case NVPTX::LDV_i64_v4: {
LoadInsts.push_back(&U);
return true;
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 95abcde..6068035 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -1003,14 +1003,10 @@ void NVPTXDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
// Helper function template to reduce amount of boilerplate code for
// opcode selection.
static std::optional<unsigned>
-pickOpcodeForVT(MVT::SimpleValueType VT, std::optional<unsigned> Opcode_i8,
- std::optional<unsigned> Opcode_i16,
+pickOpcodeForVT(MVT::SimpleValueType VT, std::optional<unsigned> Opcode_i16,
std::optional<unsigned> Opcode_i32,
std::optional<unsigned> Opcode_i64) {
switch (VT) {
- case MVT::i1:
- case MVT::i8:
- return Opcode_i8;
case MVT::f16:
case MVT::i16:
case MVT::bf16:
@@ -1078,8 +1074,8 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
Chain};
const MVT::SimpleValueType TargetVT = LD->getSimpleValueType(0).SimpleTy;
- const std::optional<unsigned> Opcode = pickOpcodeForVT(
- TargetVT, NVPTX::LD_i8, NVPTX::LD_i16, NVPTX::LD_i32, NVPTX::LD_i64);
+ const std::optional<unsigned> Opcode =
+ pickOpcodeForVT(TargetVT, NVPTX::LD_i16, NVPTX::LD_i32, NVPTX::LD_i64);
if (!Opcode)
return false;
@@ -1164,17 +1160,15 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
default:
llvm_unreachable("Unexpected opcode");
case NVPTXISD::LoadV2:
- Opcode =
- pickOpcodeForVT(EltVT.SimpleTy, NVPTX::LDV_i8_v2, NVPTX::LDV_i16_v2,
- NVPTX::LDV_i32_v2, NVPTX::LDV_i64_v2);
+ Opcode = pickOpcodeForVT(EltVT.SimpleTy, NVPTX::LDV_i16_v2,
+ NVPTX::LDV_i32_v2, NVPTX::LDV_i64_v2);
break;
case NVPTXISD::LoadV4:
- Opcode =
- pickOpcodeForVT(EltVT.SimpleTy, NVPTX::LDV_i8_v4, NVPTX::LDV_i16_v4,
- NVPTX::LDV_i32_v4, NVPTX::LDV_i64_v4);
+ Opcode = pickOpcodeForVT(EltVT.SimpleTy, NVPTX::LDV_i16_v4,
+ NVPTX::LDV_i32_v4, NVPTX::LDV_i64_v4);
break;
case NVPTXISD::LoadV8:
- Opcode = pickOpcodeForVT(EltVT.SimpleTy, {/* no v8i8 */}, {/* no v8i16 */},
+ Opcode = pickOpcodeForVT(EltVT.SimpleTy, {/* no v8i16 */},
NVPTX::LDV_i32_v8, {/* no v8i64 */});
break;
}
@@ -1230,22 +1224,21 @@ bool NVPTXDAGToDAGISel::tryLDG(MemSDNode *LD) {
default:
llvm_unreachable("Unexpected opcode");
case ISD::LOAD:
- Opcode = pickOpcodeForVT(TargetVT, NVPTX::LD_GLOBAL_NC_i8,
- NVPTX::LD_GLOBAL_NC_i16, NVPTX::LD_GLOBAL_NC_i32,
- NVPTX::LD_GLOBAL_NC_i64);
+ Opcode = pickOpcodeForVT(TargetVT, NVPTX::LD_GLOBAL_NC_i16,
+ NVPTX::LD_GLOBAL_NC_i32, NVPTX::LD_GLOBAL_NC_i64);
break;
case NVPTXISD::LoadV2:
- Opcode = pickOpcodeForVT(
- TargetVT, NVPTX::LD_GLOBAL_NC_v2i8, NVPTX::LD_GLOBAL_NC_v2i16,
- NVPTX::LD_GLOBAL_NC_v2i32, NVPTX::LD_GLOBAL_NC_v2i64);
+ Opcode =
+ pickOpcodeForVT(TargetVT, NVPTX::LD_GLOBAL_NC_v2i16,
+ NVPTX::LD_GLOBAL_NC_v2i32, NVPTX::LD_GLOBAL_NC_v2i64);
break;
case NVPTXISD::LoadV4:
- Opcode = pickOpcodeForVT(
- TargetVT, NVPTX::LD_GLOBAL_NC_v4i8, NVPTX::LD_GLOBAL_NC_v4i16,
- NVPTX::LD_GLOBAL_NC_v4i32, NVPTX::LD_GLOBAL_NC_v4i64);
+ Opcode =
+ pickOpcodeForVT(TargetVT, NVPTX::LD_GLOBAL_NC_v4i16,
+ NVPTX::LD_GLOBAL_NC_v4i32, NVPTX::LD_GLOBAL_NC_v4i64);
break;
case NVPTXISD::LoadV8:
- Opcode = pickOpcodeForVT(TargetVT, {/* no v8i8 */}, {/* no v8i16 */},
+ Opcode = pickOpcodeForVT(TargetVT, {/* no v8i16 */},
NVPTX::LD_GLOBAL_NC_v8i32, {/* no v8i64 */});
break;
}
@@ -1276,8 +1269,9 @@ bool NVPTXDAGToDAGISel::tryLDU(SDNode *N) {
break;
}
- const MVT::SimpleValueType SelectVT =
- MVT::getIntegerVT(LD->getMemoryVT().getSizeInBits() / NumElts).SimpleTy;
+ SDLoc DL(N);
+ const unsigned FromTypeWidth = LD->getMemoryVT().getSizeInBits() / NumElts;
+ const MVT::SimpleValueType TargetVT = LD->getSimpleValueType(0).SimpleTy;
// If this is an LDU intrinsic, the address is the third operand. If its an
// LDU SD node (from custom vector handling), then its the second operand
@@ -1286,32 +1280,28 @@ bool NVPTXDAGToDAGISel::tryLDU(SDNode *N) {
SDValue Base, Offset;
SelectADDR(Addr, Base, Offset);
- SDValue Ops[] = {Base, Offset, LD->getChain()};
+ SDValue Ops[] = {getI32Imm(FromTypeWidth, DL), Base, Offset, LD->getChain()};
std::optional<unsigned> Opcode;
switch (N->getOpcode()) {
default:
llvm_unreachable("Unexpected opcode");
case ISD::INTRINSIC_W_CHAIN:
- Opcode =
- pickOpcodeForVT(SelectVT, NVPTX::LDU_GLOBAL_i8, NVPTX::LDU_GLOBAL_i16,
- NVPTX::LDU_GLOBAL_i32, NVPTX::LDU_GLOBAL_i64);
+ Opcode = pickOpcodeForVT(TargetVT, NVPTX::LDU_GLOBAL_i16,
+ NVPTX::LDU_GLOBAL_i32, NVPTX::LDU_GLOBAL_i64);
break;
case NVPTXISD::LDUV2:
- Opcode = pickOpcodeForVT(SelectVT, NVPTX::LDU_GLOBAL_v2i8,
- NVPTX::LDU_GLOBAL_v2i16, NVPTX::LDU_GLOBAL_v2i32,
- NVPTX::LDU_GLOBAL_v2i64);
+ Opcode = pickOpcodeForVT(TargetVT, NVPTX::LDU_GLOBAL_v2i16,
+ NVPTX::LDU_GLOBAL_v2i32, NVPTX::LDU_GLOBAL_v2i64);
break;
case NVPTXISD::LDUV4:
- Opcode = pickOpcodeForVT(SelectVT, NVPTX::LDU_GLOBAL_v4i8,
- NVPTX::LDU_GLOBAL_v4i16, NVPTX::LDU_GLOBAL_v4i32,
- {/* no v4i64 */});
+ Opcode = pickOpcodeForVT(TargetVT, NVPTX::LDU_GLOBAL_v4i16,
+ NVPTX::LDU_GLOBAL_v4i32, {/* no v4i64 */});
break;
}
if (!Opcode)
return false;
- SDLoc DL(N);
SDNode *NVPTXLDU = CurDAG->getMachineNode(*Opcode, DL, LD->getVTList(), Ops);
ReplaceNode(LD, NVPTXLDU);
@@ -1362,8 +1352,8 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
Chain};
const std::optional<unsigned> Opcode =
- pickOpcodeForVT(Value.getSimpleValueType().SimpleTy, NVPTX::ST_i8,
- NVPTX::ST_i16, NVPTX::ST_i32, NVPTX::ST_i64);
+ pickOpcodeForVT(Value.getSimpleValueType().SimpleTy, NVPTX::ST_i16,
+ NVPTX::ST_i32, NVPTX::ST_i64);
if (!Opcode)
return false;
@@ -1423,16 +1413,16 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
default:
return false;
case NVPTXISD::StoreV2:
- Opcode = pickOpcodeForVT(EltVT, NVPTX::STV_i8_v2, NVPTX::STV_i16_v2,
- NVPTX::STV_i32_v2, NVPTX::STV_i64_v2);
+ Opcode = pickOpcodeForVT(EltVT, NVPTX::STV_i16_v2, NVPTX::STV_i32_v2,
+ NVPTX::STV_i64_v2);
break;
case NVPTXISD::StoreV4:
- Opcode = pickOpcodeForVT(EltVT, NVPTX::STV_i8_v4, NVPTX::STV_i16_v4,
- NVPTX::STV_i32_v4, NVPTX::STV_i64_v4);
+ Opcode = pickOpcodeForVT(EltVT, NVPTX::STV_i16_v4, NVPTX::STV_i32_v4,
+ NVPTX::STV_i64_v4);
break;
case NVPTXISD::StoreV8:
- Opcode = pickOpcodeForVT(EltVT, {/* no v8i8 */}, {/* no v8i16 */},
- NVPTX::STV_i32_v8, {/* no v8i64 */});
+ Opcode = pickOpcodeForVT(EltVT, {/* no v8i16 */}, NVPTX::STV_i32_v8,
+ {/* no v8i64 */});
break;
}
@@ -1687,10 +1677,11 @@ bool NVPTXDAGToDAGISel::tryBF16ArithToFMA(SDNode *N) {
auto API = APF.bitcastToAPInt();
API = API.concat(API);
auto Const = CurDAG->getTargetConstant(API, DL, MVT::i32);
- return SDValue(CurDAG->getMachineNode(NVPTX::IMOV32i, DL, VT, Const), 0);
+ return SDValue(CurDAG->getMachineNode(NVPTX::MOV_B32_i, DL, VT, Const),
+ 0);
}
auto Const = CurDAG->getTargetConstantFP(APF, DL, VT);
- return SDValue(CurDAG->getMachineNode(NVPTX::BFMOV16i, DL, VT, Const), 0);
+ return SDValue(CurDAG->getMachineNode(NVPTX::MOV_BF16_i, DL, VT, Const), 0);
};
switch (N->getOpcode()) {
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 4fd3623..65d1be3 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -4917,7 +4917,6 @@ combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
return SDValue();
auto *LD = cast<MemSDNode>(N);
- EVT MemVT = LD->getMemoryVT();
SDLoc DL(LD);
// the new opcode after we double the number of operands
@@ -4958,9 +4957,9 @@ combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
NewVTs.append(LD->value_begin() + OldNumOutputs, LD->value_end());
// Create the new load
- SDValue NewLoad =
- DCI.DAG.getMemIntrinsicNode(Opcode, DL, DCI.DAG.getVTList(NewVTs),
- Operands, MemVT, LD->getMemOperand());
+ SDValue NewLoad = DCI.DAG.getMemIntrinsicNode(
+ Opcode, DL, DCI.DAG.getVTList(NewVTs), Operands, LD->getMemoryVT(),
+ LD->getMemOperand());
// Now we use a combination of BUILD_VECTORs and a MERGE_VALUES node to keep
// the outputs the same. These nodes will be optimized away in later
@@ -5002,7 +5001,6 @@ static SDValue combinePackingMovIntoStore(SDNode *N,
return SDValue();
auto *ST = cast<MemSDNode>(N);
- EVT MemVT = ElementVT.getVectorElementType();
// The new opcode after we double the number of operands.
NVPTXISD::NodeType Opcode;
@@ -5011,11 +5009,9 @@ static SDValue combinePackingMovIntoStore(SDNode *N,
// Any packed type is legal, so the legalizer will not have lowered
// ISD::STORE -> NVPTXISD::Store (unless it's under-aligned). We have to do
// it here.
- MemVT = ST->getMemoryVT();
Opcode = NVPTXISD::StoreV2;
break;
case NVPTXISD::StoreV2:
- MemVT = ST->getMemoryVT();
Opcode = NVPTXISD::StoreV4;
break;
case NVPTXISD::StoreV4:
@@ -5066,7 +5062,7 @@ static SDValue combinePackingMovIntoStore(SDNode *N,
// Now we replace the store
return DCI.DAG.getMemIntrinsicNode(Opcode, SDLoc(N), N->getVTList(), Operands,
- MemVT, ST->getMemOperand());
+ ST->getMemoryVT(), ST->getMemOperand());
}
static SDValue PerformStoreCombine(SDNode *N,
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td b/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td
index 86dcb4a..719be03 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrFormats.td
@@ -11,15 +11,9 @@
//
//===----------------------------------------------------------------------===//
-// Vector instruction type enum
-class VecInstTypeEnum<bits<4> val> {
- bits<4> Value=val;
-}
-def VecNOP : VecInstTypeEnum<0>;
-
// Generic NVPTX Format
-class NVPTXInst<dag outs, dag ins, string asmstr, list<dag> pattern>
+class NVPTXInst<dag outs, dag ins, string asmstr, list<dag> pattern = []>
: Instruction {
field bits<14> Inst;
@@ -30,7 +24,6 @@ class NVPTXInst<dag outs, dag ins, string asmstr, list<dag> pattern>
let Pattern = pattern;
// TSFlagFields
- bits<4> VecInstType = VecNOP.Value;
bit IsLoad = false;
bit IsStore = false;
@@ -45,7 +38,6 @@ class NVPTXInst<dag outs, dag ins, string asmstr, list<dag> pattern>
// 2**(2-1) = 2.
bits<2> IsSuld = 0;
- let TSFlags{3...0} = VecInstType;
let TSFlags{4} = IsLoad;
let TSFlags{5} = IsStore;
let TSFlags{6} = IsTex;
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp
index e218ef1..34fe467 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp
@@ -35,23 +35,23 @@ void NVPTXInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
const TargetRegisterClass *DestRC = MRI.getRegClass(DestReg);
const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
- if (RegInfo.getRegSizeInBits(*DestRC) != RegInfo.getRegSizeInBits(*SrcRC))
+ if (DestRC != SrcRC)
report_fatal_error("Copy one register into another with a different width");
unsigned Op;
- if (DestRC == &NVPTX::B1RegClass) {
- Op = NVPTX::IMOV1r;
- } else if (DestRC == &NVPTX::B16RegClass) {
- Op = NVPTX::MOV16r;
- } else if (DestRC == &NVPTX::B32RegClass) {
- Op = NVPTX::IMOV32r;
- } else if (DestRC == &NVPTX::B64RegClass) {
- Op = NVPTX::IMOV64r;
- } else if (DestRC == &NVPTX::B128RegClass) {
- Op = NVPTX::IMOV128r;
- } else {
+ if (DestRC == &NVPTX::B1RegClass)
+ Op = NVPTX::MOV_B1_r;
+ else if (DestRC == &NVPTX::B16RegClass)
+ Op = NVPTX::MOV_B16_r;
+ else if (DestRC == &NVPTX::B32RegClass)
+ Op = NVPTX::MOV_B32_r;
+ else if (DestRC == &NVPTX::B64RegClass)
+ Op = NVPTX::MOV_B64_r;
+ else if (DestRC == &NVPTX::B128RegClass)
+ Op = NVPTX::MOV_B128_r;
+ else
llvm_unreachable("Bad register copy");
- }
+
BuildMI(MBB, I, DL, get(Op), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 41bfe7e..d8047d3 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -15,19 +15,8 @@ include "NVPTXInstrFormats.td"
let OperandType = "OPERAND_IMMEDIATE" in {
def f16imm : Operand<f16>;
def bf16imm : Operand<bf16>;
-
}
-// List of vector specific properties
-def isVecLD : VecInstTypeEnum<1>;
-def isVecST : VecInstTypeEnum<2>;
-def isVecBuild : VecInstTypeEnum<3>;
-def isVecShuffle : VecInstTypeEnum<4>;
-def isVecExtract : VecInstTypeEnum<5>;
-def isVecInsert : VecInstTypeEnum<6>;
-def isVecDest : VecInstTypeEnum<7>;
-def isVecOther : VecInstTypeEnum<15>;
-
//===----------------------------------------------------------------------===//
// NVPTX Operand Definitions.
//===----------------------------------------------------------------------===//
@@ -484,46 +473,28 @@ let hasSideEffects = false in {
// takes a CvtMode immediate that defines the conversion mode to use. It can
// be CvtNONE to omit a conversion mode.
multiclass CVT_FROM_ALL<string ToType, RegisterClass RC, list<Predicate> Preds = []> {
- def _s8 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B16:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".s8">,
- Requires<Preds>;
- def _u8 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B16:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".u8">,
- Requires<Preds>;
- def _s16 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B16:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".s16">,
- Requires<Preds>;
- def _u16 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B16:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".u16">,
- Requires<Preds>;
- def _s32 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B32:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".s32">,
- Requires<Preds>;
- def _u32 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B32:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".u32">,
- Requires<Preds>;
- def _s64 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B64:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".s64">,
- Requires<Preds>;
- def _u64 :
- BasicFlagsNVPTXInst<(outs RC:$dst),
- (ins B64:$src), (ins CvtMode:$mode),
- "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # ".u64">,
- Requires<Preds>;
+ foreach sign = ["s", "u"] in {
+ def _ # sign # "8" :
+ BasicFlagsNVPTXInst<(outs RC:$dst),
+ (ins B16:$src), (ins CvtMode:$mode),
+ "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # "." # sign # "8">,
+ Requires<Preds>;
+ def _ # sign # "16" :
+ BasicFlagsNVPTXInst<(outs RC:$dst),
+ (ins B16:$src), (ins CvtMode:$mode),
+ "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # "." # sign # "16">,
+ Requires<Preds>;
+ def _ # sign # "32" :
+ BasicFlagsNVPTXInst<(outs RC:$dst),
+ (ins B32:$src), (ins CvtMode:$mode),
+ "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # "." # sign # "32">,
+ Requires<Preds>;
+ def _ # sign # "64" :
+ BasicFlagsNVPTXInst<(outs RC:$dst),
+ (ins B64:$src), (ins CvtMode:$mode),
+ "cvt${mode:base}${mode:ftz}${mode:sat}." # ToType # "." # sign # "64">,
+ Requires<Preds>;
+ }
def _f16 :
BasicFlagsNVPTXInst<(outs RC:$dst),
(ins B16:$src), (ins CvtMode:$mode),
@@ -554,14 +525,12 @@ let hasSideEffects = false in {
}
// Generate cvts from all types to all types.
- defm CVT_s8 : CVT_FROM_ALL<"s8", B16>;
- defm CVT_u8 : CVT_FROM_ALL<"u8", B16>;
- defm CVT_s16 : CVT_FROM_ALL<"s16", B16>;
- defm CVT_u16 : CVT_FROM_ALL<"u16", B16>;
- defm CVT_s32 : CVT_FROM_ALL<"s32", B32>;
- defm CVT_u32 : CVT_FROM_ALL<"u32", B32>;
- defm CVT_s64 : CVT_FROM_ALL<"s64", B64>;
- defm CVT_u64 : CVT_FROM_ALL<"u64", B64>;
+ foreach sign = ["s", "u"] in {
+ defm CVT_ # sign # "8" : CVT_FROM_ALL<sign # "8", B16>;
+ defm CVT_ # sign # "16" : CVT_FROM_ALL<sign # "16", B16>;
+ defm CVT_ # sign # "32" : CVT_FROM_ALL<sign # "32", B32>;
+ defm CVT_ # sign # "64" : CVT_FROM_ALL<sign # "64", B64>;
+ }
defm CVT_f16 : CVT_FROM_ALL<"f16", B16>;
defm CVT_bf16 : CVT_FROM_ALL<"bf16", B16, [hasPTX<78>, hasSM<90>]>;
defm CVT_f32 : CVT_FROM_ALL<"f32", B32>;
@@ -569,18 +538,12 @@ let hasSideEffects = false in {
// These cvts are different from those above: The source and dest registers
// are of the same type.
- def CVT_INREG_s16_s8 : BasicNVPTXInst<(outs B16:$dst), (ins B16:$src),
- "cvt.s16.s8">;
- def CVT_INREG_s32_s8 : BasicNVPTXInst<(outs B32:$dst), (ins B32:$src),
- "cvt.s32.s8">;
- def CVT_INREG_s32_s16 : BasicNVPTXInst<(outs B32:$dst), (ins B32:$src),
- "cvt.s32.s16">;
- def CVT_INREG_s64_s8 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src),
- "cvt.s64.s8">;
- def CVT_INREG_s64_s16 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src),
- "cvt.s64.s16">;
- def CVT_INREG_s64_s32 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src),
- "cvt.s64.s32">;
+ def CVT_INREG_s16_s8 : BasicNVPTXInst<(outs B16:$dst), (ins B16:$src), "cvt.s16.s8">;
+ def CVT_INREG_s32_s8 : BasicNVPTXInst<(outs B32:$dst), (ins B32:$src), "cvt.s32.s8">;
+ def CVT_INREG_s32_s16 : BasicNVPTXInst<(outs B32:$dst), (ins B32:$src), "cvt.s32.s16">;
+ def CVT_INREG_s64_s8 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src), "cvt.s64.s8">;
+ def CVT_INREG_s64_s16 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src), "cvt.s64.s16">;
+ def CVT_INREG_s64_s32 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src), "cvt.s64.s32">;
multiclass CVT_FROM_FLOAT_V2_SM80<string FromName, RegisterClass RC> {
def _f32 :
@@ -782,7 +745,7 @@ defm SUB : I3<"sub.s", sub, commutative = false>;
def ADD16x2 : I16x2<"add.s", add>;
-// in32 and int64 addition and subtraction with carry-out.
+// int32 and int64 addition and subtraction with carry-out.
defm ADDCC : ADD_SUB_INT_CARRY<"add.cc", addc, commutative = true>;
defm SUBCC : ADD_SUB_INT_CARRY<"sub.cc", subc, commutative = false>;
@@ -803,17 +766,17 @@ defm UDIV : I3<"div.u", udiv, commutative = false>;
defm SREM : I3<"rem.s", srem, commutative = false>;
defm UREM : I3<"rem.u", urem, commutative = false>;
-// Integer absolute value. NumBits should be one minus the bit width of RC.
-// This idiom implements the algorithm at
-// http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs.
-multiclass ABS<ValueType T, RegisterClass RC, string SizeName> {
- def : BasicNVPTXInst<(outs RC:$dst), (ins RC:$a),
- "abs" # SizeName,
- [(set T:$dst, (abs T:$a))]>;
+foreach t = [I16RT, I32RT, I64RT] in {
+ def ABS_S # t.Size :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$a),
+ "abs.s" # t.Size,
+ [(set t.Ty:$dst, (abs t.Ty:$a))]>;
+
+ def NEG_S # t.Size :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$src),
+ "neg.s" # t.Size,
+ [(set t.Ty:$dst, (ineg t.Ty:$src))]>;
}
-defm ABS_16 : ABS<i16, B16, ".s16">;
-defm ABS_32 : ABS<i32, B32, ".s32">;
-defm ABS_64 : ABS<i64, B64, ".s64">;
// Integer min/max.
defm SMAX : I3<"max.s", smax, commutative = true>;
@@ -830,116 +793,63 @@ def UMIN16x2 : I16x2<"min.u", umin>;
//
// Wide multiplication
//
-def MULWIDES64 :
- BasicNVPTXInst<(outs B64:$dst), (ins B32:$a, B32:$b), "mul.wide.s32">;
-def MULWIDES64Imm :
- BasicNVPTXInst<(outs B64:$dst), (ins B32:$a, i32imm:$b), "mul.wide.s32">;
-
-def MULWIDEU64 :
- BasicNVPTXInst<(outs B64:$dst), (ins B32:$a, B32:$b), "mul.wide.u32">;
-def MULWIDEU64Imm :
- BasicNVPTXInst<(outs B64:$dst), (ins B32:$a, i32imm:$b), "mul.wide.u32">;
-
-def MULWIDES32 :
- BasicNVPTXInst<(outs B32:$dst), (ins B16:$a, B16:$b), "mul.wide.s16">;
-def MULWIDES32Imm :
- BasicNVPTXInst<(outs B32:$dst), (ins B16:$a, i16imm:$b), "mul.wide.s16">;
-
-def MULWIDEU32 :
- BasicNVPTXInst<(outs B32:$dst), (ins B16:$a, B16:$b), "mul.wide.u16">;
-def MULWIDEU32Imm :
- BasicNVPTXInst<(outs B32:$dst), (ins B16:$a, i16imm:$b), "mul.wide.u16">;
def SDTMulWide : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>, SDTCisSameAs<1, 2>]>;
-def mul_wide_signed : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide, [SDNPCommutative]>;
-def mul_wide_unsigned : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide, [SDNPCommutative]>;
+def smul_wide : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide, [SDNPCommutative]>;
+def umul_wide : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide, [SDNPCommutative]>;
-// Matchers for signed, unsigned mul.wide ISD nodes.
-let Predicates = [hasOptEnabled] in {
- def : Pat<(i32 (mul_wide_signed i16:$a, i16:$b)), (MULWIDES32 $a, $b)>;
- def : Pat<(i32 (mul_wide_signed i16:$a, imm:$b)), (MULWIDES32Imm $a, imm:$b)>;
- def : Pat<(i32 (mul_wide_unsigned i16:$a, i16:$b)), (MULWIDEU32 $a, $b)>;
- def : Pat<(i32 (mul_wide_unsigned i16:$a, imm:$b)), (MULWIDEU32Imm $a, imm:$b)>;
- def : Pat<(i64 (mul_wide_signed i32:$a, i32:$b)), (MULWIDES64 $a, $b)>;
- def : Pat<(i64 (mul_wide_signed i32:$a, imm:$b)), (MULWIDES64Imm $a, imm:$b)>;
- def : Pat<(i64 (mul_wide_unsigned i32:$a, i32:$b)), (MULWIDEU64 $a, $b)>;
- def : Pat<(i64 (mul_wide_unsigned i32:$a, imm:$b)), (MULWIDEU64Imm $a, imm:$b)>;
+multiclass MULWIDEInst<string suffix, SDPatternOperator op, RegTyInfo big_t, RegTyInfo small_t> {
+ def suffix # _rr :
+ BasicNVPTXInst<(outs big_t.RC:$dst), (ins small_t.RC:$a, small_t.RC:$b),
+ "mul.wide." # suffix,
+ [(set big_t.Ty:$dst, (op small_t.Ty:$a, small_t.Ty:$b))]>;
+ def suffix # _ri :
+ BasicNVPTXInst<(outs big_t.RC:$dst), (ins small_t.RC:$a, small_t.Imm:$b),
+ "mul.wide." # suffix,
+ [(set big_t.Ty:$dst, (op small_t.Ty:$a, imm:$b))]>;
}
+defm MUL_WIDE : MULWIDEInst<"s32", smul_wide, I64RT, I32RT>;
+defm MUL_WIDE : MULWIDEInst<"u32", umul_wide, I64RT, I32RT>;
+defm MUL_WIDE : MULWIDEInst<"s16", smul_wide, I32RT, I16RT>;
+defm MUL_WIDE : MULWIDEInst<"u16", umul_wide, I32RT, I16RT>;
+
//
// Integer multiply-add
//
-def mul_oneuse : OneUse2<mul>;
-
-multiclass MAD<string Ptx, ValueType VT, NVPTXRegClass Reg, Operand Imm> {
- def rrr:
- BasicNVPTXInst<(outs Reg:$dst),
- (ins Reg:$a, Reg:$b, Reg:$c),
- Ptx,
- [(set VT:$dst, (add (mul_oneuse VT:$a, VT:$b), VT:$c))]>;
-
- def rir:
- BasicNVPTXInst<(outs Reg:$dst),
- (ins Reg:$a, Imm:$b, Reg:$c),
- Ptx,
- [(set VT:$dst, (add (mul_oneuse VT:$a, imm:$b), VT:$c))]>;
- def rri:
- BasicNVPTXInst<(outs Reg:$dst),
- (ins Reg:$a, Reg:$b, Imm:$c),
- Ptx,
- [(set VT:$dst, (add (mul_oneuse VT:$a, VT:$b), imm:$c))]>;
- def rii:
- BasicNVPTXInst<(outs Reg:$dst),
- (ins Reg:$a, Imm:$b, Imm:$c),
- Ptx,
- [(set VT:$dst, (add (mul_oneuse VT:$a, imm:$b), imm:$c))]>;
-}
-
-let Predicates = [hasOptEnabled] in {
-defm MAD16 : MAD<"mad.lo.s16", i16, B16, i16imm>;
-defm MAD32 : MAD<"mad.lo.s32", i32, B32, i32imm>;
-defm MAD64 : MAD<"mad.lo.s64", i64, B64, i64imm>;
-}
-
-multiclass MAD_WIDE<string PtxSuffix, OneUse2 Op, RegTyInfo BigT, RegTyInfo SmallT> {
+multiclass MADInst<string suffix, SDPatternOperator op, RegTyInfo big_t, RegTyInfo small_t> {
def rrr:
- BasicNVPTXInst<(outs BigT.RC:$dst),
- (ins SmallT.RC:$a, SmallT.RC:$b, BigT.RC:$c),
- "mad.wide." # PtxSuffix,
- [(set BigT.Ty:$dst, (add (Op SmallT.Ty:$a, SmallT.Ty:$b), BigT.Ty:$c))]>;
+ BasicNVPTXInst<(outs big_t.RC:$dst),
+ (ins small_t.RC:$a, small_t.RC:$b, big_t.RC:$c),
+ "mad." # suffix,
+ [(set big_t.Ty:$dst, (add (OneUse2<op> small_t.Ty:$a, small_t.Ty:$b), big_t.Ty:$c))]>;
def rri:
- BasicNVPTXInst<(outs BigT.RC:$dst),
- (ins SmallT.RC:$a, SmallT.RC:$b, BigT.Imm:$c),
- "mad.wide." # PtxSuffix,
- [(set BigT.Ty:$dst, (add (Op SmallT.Ty:$a, SmallT.Ty:$b), imm:$c))]>;
+ BasicNVPTXInst<(outs big_t.RC:$dst),
+ (ins small_t.RC:$a, small_t.RC:$b, big_t.Imm:$c),
+ "mad." # suffix,
+ [(set big_t.Ty:$dst, (add (OneUse2<op> small_t.Ty:$a, small_t.Ty:$b), imm:$c))]>;
def rir:
- BasicNVPTXInst<(outs BigT.RC:$dst),
- (ins SmallT.RC:$a, SmallT.Imm:$b, BigT.RC:$c),
- "mad.wide." # PtxSuffix,
- [(set BigT.Ty:$dst, (add (Op SmallT.Ty:$a, imm:$b), BigT.Ty:$c))]>;
+ BasicNVPTXInst<(outs big_t.RC:$dst),
+ (ins small_t.RC:$a, small_t.Imm:$b, big_t.RC:$c),
+ "mad." # suffix,
+ [(set big_t.Ty:$dst, (add (OneUse2<op> small_t.Ty:$a, imm:$b), big_t.Ty:$c))]>;
def rii:
- BasicNVPTXInst<(outs BigT.RC:$dst),
- (ins SmallT.RC:$a, SmallT.Imm:$b, BigT.Imm:$c),
- "mad.wide." # PtxSuffix,
- [(set BigT.Ty:$dst, (add (Op SmallT.Ty:$a, imm:$b), imm:$c))]>;
+ BasicNVPTXInst<(outs big_t.RC:$dst),
+ (ins small_t.RC:$a, small_t.Imm:$b, big_t.Imm:$c),
+ "mad." # suffix,
+ [(set big_t.Ty:$dst, (add (OneUse2<op> small_t.Ty:$a, imm:$b), imm:$c))]>;
}
-def mul_wide_unsigned_oneuse : OneUse2<mul_wide_unsigned>;
-def mul_wide_signed_oneuse : OneUse2<mul_wide_signed>;
-
let Predicates = [hasOptEnabled] in {
-defm MAD_WIDE_U16 : MAD_WIDE<"u16", mul_wide_unsigned_oneuse, I32RT, I16RT>;
-defm MAD_WIDE_S16 : MAD_WIDE<"s16", mul_wide_signed_oneuse, I32RT, I16RT>;
-defm MAD_WIDE_U32 : MAD_WIDE<"u32", mul_wide_unsigned_oneuse, I64RT, I32RT>;
-defm MAD_WIDE_S32 : MAD_WIDE<"s32", mul_wide_signed_oneuse, I64RT, I32RT>;
-}
+ defm MAD_LO_S16 : MADInst<"lo.s16", mul, I16RT, I16RT>;
+ defm MAD_LO_S32 : MADInst<"lo.s32", mul, I32RT, I32RT>;
+ defm MAD_LO_S64 : MADInst<"lo.s64", mul, I64RT, I64RT>;
-foreach t = [I16RT, I32RT, I64RT] in {
- def NEG_S # t.Size :
- BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$src),
- "neg.s" # t.Size,
- [(set t.Ty:$dst, (ineg t.Ty:$src))]>;
+ defm MAD_WIDE_U16 : MADInst<"wide.u16", umul_wide, I32RT, I16RT>;
+ defm MAD_WIDE_S16 : MADInst<"wide.s16", smul_wide, I32RT, I16RT>;
+ defm MAD_WIDE_U32 : MADInst<"wide.u32", umul_wide, I64RT, I32RT>;
+ defm MAD_WIDE_S32 : MADInst<"wide.s32", smul_wide, I64RT, I32RT>;
}
//-----------------------------------
@@ -1050,8 +960,7 @@ def fdiv_approx : PatFrag<(ops node:$a, node:$b),
def FRCP32_approx_r :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$b), (ins FTZFlag:$ftz),
"rcp.approx$ftz.f32",
[(set f32:$dst, (fdiv_approx f32imm_1, f32:$b))]>;
@@ -1060,14 +969,12 @@ def FRCP32_approx_r :
//
def FDIV32_approx_rr :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$a, B32:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$a, B32:$b), (ins FTZFlag:$ftz),
"div.approx$ftz.f32",
[(set f32:$dst, (fdiv_approx f32:$a, f32:$b))]>;
def FDIV32_approx_ri :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$a, f32imm:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$a, f32imm:$b), (ins FTZFlag:$ftz),
"div.approx$ftz.f32",
[(set f32:$dst, (fdiv_approx f32:$a, fpimm:$b))]>;
//
@@ -1090,14 +997,12 @@ def : Pat<(fdiv_full f32imm_1, f32:$b),
//
def FDIV32rr :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$a, B32:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$a, B32:$b), (ins FTZFlag:$ftz),
"div.full$ftz.f32",
[(set f32:$dst, (fdiv_full f32:$a, f32:$b))]>;
def FDIV32ri :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$a, f32imm:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$a, f32imm:$b), (ins FTZFlag:$ftz),
"div.full$ftz.f32",
[(set f32:$dst, (fdiv_full f32:$a, fpimm:$b))]>;
//
@@ -1111,8 +1016,7 @@ def fdiv_ftz : PatFrag<(ops node:$a, node:$b),
def FRCP32r_prec :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$b), (ins FTZFlag:$ftz),
"rcp.rn$ftz.f32",
[(set f32:$dst, (fdiv_ftz f32imm_1, f32:$b))]>;
//
@@ -1120,14 +1024,12 @@ def FRCP32r_prec :
//
def FDIV32rr_prec :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$a, B32:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$a, B32:$b), (ins FTZFlag:$ftz),
"div.rn$ftz.f32",
[(set f32:$dst, (fdiv_ftz f32:$a, f32:$b))]>;
def FDIV32ri_prec :
BasicFlagsNVPTXInst<(outs B32:$dst),
- (ins B32:$a, f32imm:$b),
- (ins FTZFlag:$ftz),
+ (ins B32:$a, f32imm:$b), (ins FTZFlag:$ftz),
"div.rn$ftz.f32",
[(set f32:$dst, (fdiv_ftz f32:$a, fpimm:$b))]>;
@@ -1206,10 +1108,8 @@ def TANH_APPROX_f32 :
// Template for three-arg bitwise operations. Takes three args, Creates .b16,
// .b32, .b64, and .pred (predicate registers -- i.e., i1) versions of OpcStr.
multiclass BITWISE<string OpcStr, SDNode OpNode> {
- defm b1 : I3Inst<OpcStr # ".pred", OpNode, I1RT, commutative = true>;
- defm b16 : I3Inst<OpcStr # ".b16", OpNode, I16RT, commutative = true>;
- defm b32 : I3Inst<OpcStr # ".b32", OpNode, I32RT, commutative = true>;
- defm b64 : I3Inst<OpcStr # ".b64", OpNode, I64RT, commutative = true>;
+ foreach t = [I1RT, I16RT, I32RT, I64RT] in
+ defm _ # t.PtxType : I3Inst<OpcStr # "." # t.PtxType, OpNode, t, commutative = true>;
}
defm OR : BITWISE<"or", or>;
@@ -1217,48 +1117,40 @@ defm AND : BITWISE<"and", and>;
defm XOR : BITWISE<"xor", xor>;
// PTX does not support mul on predicates, convert to and instructions
-def : Pat<(mul i1:$a, i1:$b), (ANDb1rr $a, $b)>;
-def : Pat<(mul i1:$a, imm:$b), (ANDb1ri $a, imm:$b)>;
+def : Pat<(mul i1:$a, i1:$b), (AND_predrr $a, $b)>;
+def : Pat<(mul i1:$a, imm:$b), (AND_predri $a, imm:$b)>;
foreach op = [add, sub] in {
- def : Pat<(op i1:$a, i1:$b), (XORb1rr $a, $b)>;
- def : Pat<(op i1:$a, imm:$b), (XORb1ri $a, imm:$b)>;
+ def : Pat<(op i1:$a, i1:$b), (XOR_predrr $a, $b)>;
+ def : Pat<(op i1:$a, imm:$b), (XOR_predri $a, imm:$b)>;
}
// These transformations were once reliably performed by instcombine, but thanks
// to poison semantics they are no longer safe for LLVM IR, perform them here
// instead.
-def : Pat<(select i1:$a, i1:$b, 0), (ANDb1rr $a, $b)>;
-def : Pat<(select i1:$a, 1, i1:$b), (ORb1rr $a, $b)>;
+def : Pat<(select i1:$a, i1:$b, 0), (AND_predrr $a, $b)>;
+def : Pat<(select i1:$a, 1, i1:$b), (OR_predrr $a, $b)>;
// Lower logical v2i16/v4i8 ops as bitwise ops on b32.
foreach vt = [v2i16, v4i8] in {
- def : Pat<(or vt:$a, vt:$b), (ORb32rr $a, $b)>;
- def : Pat<(xor vt:$a, vt:$b), (XORb32rr $a, $b)>;
- def : Pat<(and vt:$a, vt:$b), (ANDb32rr $a, $b)>;
+ def : Pat<(or vt:$a, vt:$b), (OR_b32rr $a, $b)>;
+ def : Pat<(xor vt:$a, vt:$b), (XOR_b32rr $a, $b)>;
+ def : Pat<(and vt:$a, vt:$b), (AND_b32rr $a, $b)>;
// The constants get legalized into a bitcast from i32, so that's what we need
// to match here.
def: Pat<(or vt:$a, (vt (bitconvert (i32 imm:$b)))),
- (ORb32ri $a, imm:$b)>;
+ (OR_b32ri $a, imm:$b)>;
def: Pat<(xor vt:$a, (vt (bitconvert (i32 imm:$b)))),
- (XORb32ri $a, imm:$b)>;
+ (XOR_b32ri $a, imm:$b)>;
def: Pat<(and vt:$a, (vt (bitconvert (i32 imm:$b)))),
- (ANDb32ri $a, imm:$b)>;
-}
-
-def NOT1 : BasicNVPTXInst<(outs B1:$dst), (ins B1:$src),
- "not.pred",
- [(set i1:$dst, (not i1:$src))]>;
-def NOT16 : BasicNVPTXInst<(outs B16:$dst), (ins B16:$src),
- "not.b16",
- [(set i16:$dst, (not i16:$src))]>;
-def NOT32 : BasicNVPTXInst<(outs B32:$dst), (ins B32:$src),
- "not.b32",
- [(set i32:$dst, (not i32:$src))]>;
-def NOT64 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src),
- "not.b64",
- [(set i64:$dst, (not i64:$src))]>;
+ (AND_b32ri $a, imm:$b)>;
+}
+
+foreach t = [I1RT, I16RT, I32RT, I64RT] in
+ def NOT_ # t.PtxType : BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$src),
+ "not." # t.PtxType,
+ [(set t.Ty:$dst, (not t.Ty:$src))]>;
// Template for left/right shifts. Takes three operands,
// [dest (reg), src (reg), shift (reg or imm)].
@@ -1266,34 +1158,22 @@ def NOT64 : BasicNVPTXInst<(outs B64:$dst), (ins B64:$src),
//
// This template also defines a 32-bit shift (imm, imm) instruction.
multiclass SHIFT<string OpcStr, SDNode OpNode> {
- def i64rr :
- BasicNVPTXInst<(outs B64:$dst), (ins B64:$a, B32:$b),
- OpcStr # "64",
- [(set i64:$dst, (OpNode i64:$a, i32:$b))]>;
- def i64ri :
- BasicNVPTXInst<(outs B64:$dst), (ins B64:$a, i32imm:$b),
- OpcStr # "64",
- [(set i64:$dst, (OpNode i64:$a, (i32 imm:$b)))]>;
- def i32rr :
- BasicNVPTXInst<(outs B32:$dst), (ins B32:$a, B32:$b),
- OpcStr # "32",
- [(set i32:$dst, (OpNode i32:$a, i32:$b))]>;
- def i32ri :
- BasicNVPTXInst<(outs B32:$dst), (ins B32:$a, i32imm:$b),
- OpcStr # "32",
- [(set i32:$dst, (OpNode i32:$a, (i32 imm:$b)))]>;
- def i32ii :
- BasicNVPTXInst<(outs B32:$dst), (ins i32imm:$a, i32imm:$b),
- OpcStr # "32",
- [(set i32:$dst, (OpNode (i32 imm:$a), (i32 imm:$b)))]>;
- def i16rr :
- BasicNVPTXInst<(outs B16:$dst), (ins B16:$a, B32:$b),
- OpcStr # "16",
- [(set i16:$dst, (OpNode i16:$a, i32:$b))]>;
- def i16ri :
- BasicNVPTXInst<(outs B16:$dst), (ins B16:$a, i32imm:$b),
- OpcStr # "16",
- [(set i16:$dst, (OpNode i16:$a, (i32 imm:$b)))]>;
+ let hasSideEffects = false in {
+ foreach t = [I64RT, I32RT, I16RT] in {
+ def t.Size # _rr :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$a, B32:$b),
+ OpcStr # t.Size,
+ [(set t.Ty:$dst, (OpNode t.Ty:$a, i32:$b))]>;
+ def t.Size # _ri :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$a, i32imm:$b),
+ OpcStr # t.Size,
+ [(set t.Ty:$dst, (OpNode t.Ty:$a, (i32 imm:$b)))]>;
+ def t.Size # _ii :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$a, i32imm:$b),
+ OpcStr # t.Size,
+ [(set t.Ty:$dst, (OpNode (t.Ty imm:$a), (i32 imm:$b)))]>;
+ }
+ }
}
defm SHL : SHIFT<"shl.b", shl>;
@@ -1301,14 +1181,11 @@ defm SRA : SHIFT<"shr.s", sra>;
defm SRL : SHIFT<"shr.u", srl>;
// Bit-reverse
-def BREV32 :
- BasicNVPTXInst<(outs B32:$dst), (ins B32:$a),
- "brev.b32",
- [(set i32:$dst, (bitreverse i32:$a))]>;
-def BREV64 :
- BasicNVPTXInst<(outs B64:$dst), (ins B64:$a),
- "brev.b64",
- [(set i64:$dst, (bitreverse i64:$a))]>;
+foreach t = [I64RT, I32RT] in
+ def BREV_ # t.PtxType :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$a),
+ "brev." # t.PtxType,
+ [(set t.Ty:$dst, (bitreverse t.Ty:$a))]>;
//
@@ -1460,20 +1337,19 @@ def : Pat<(i16 (sext_inreg (trunc (prmt i32:$s, 0, byte_extract_prmt:$sel, PrmtN
// Byte extraction via shift/trunc/sext
-def : Pat<(i16 (sext_inreg (trunc i32:$s), i8)),
- (CVT_s8_s32 $s, CvtNONE)>;
-def : Pat<(i16 (sext_inreg (trunc (srl i32:$s, (i32 imm:$o))), i8)),
+def : Pat<(i16 (sext_inreg (trunc i32:$s), i8)), (CVT_s8_s32 $s, CvtNONE)>;
+def : Pat<(i16 (sext_inreg (trunc i64:$s), i8)), (CVT_s8_s64 $s, CvtNONE)>;
+
+def : Pat<(sext_inreg (srl i32:$s, (i32 imm:$o)), i8), (BFE_S32rii $s, imm:$o, 8)>;
+def : Pat<(sext_inreg (srl i64:$s, (i32 imm:$o)), i8), (BFE_S64rii $s, imm:$o, 8)>;
+
+def : Pat<(i16 (sext_inreg (trunc (srl i32:$s, (i32 imm:$o))), i8)),
(CVT_s8_s32 (BFE_S32rii $s, imm:$o, 8), CvtNONE)>;
-def : Pat<(sext_inreg (srl i32:$s, (i32 imm:$o)), i8),
- (BFE_S32rii $s, imm:$o, 8)>;
+def : Pat<(i16 (sext_inreg (trunc (srl i64:$s, (i32 imm:$o))), i8)),
+ (CVT_s8_s64 (BFE_S64rii $s, imm:$o, 8), CvtNONE)>;
+
def : Pat<(i16 (sra (i16 (trunc i32:$s)), (i32 8))),
(CVT_s8_s32 (BFE_S32rii $s, 8, 8), CvtNONE)>;
-def : Pat<(sext_inreg (srl i64:$s, (i32 imm:$o)), i8),
- (BFE_S64rii $s, imm:$o, 8)>;
-def : Pat<(i16 (sext_inreg (trunc i64:$s), i8)),
- (CVT_s8_s64 $s, CvtNONE)>;
-def : Pat<(i16 (sext_inreg (trunc (srl i64:$s, (i32 imm:$o))), i8)),
- (CVT_s8_s64 (BFE_S64rii $s, imm:$o, 8), CvtNONE)>;
//-----------------------------------
// Comparison instructions (setp, set)
@@ -1563,10 +1439,7 @@ def SETP_bf16x2rr :
def addr : ComplexPattern<pAny, 2, "SelectADDR">;
-def ADDR_base : Operand<pAny> {
- let PrintMethod = "printOperand";
-}
-
+def ADDR_base : Operand<pAny>;
def ADDR : Operand<pAny> {
let PrintMethod = "printMemOperand";
let MIOperandInfo = (ops ADDR_base, i32imm);
@@ -1580,10 +1453,6 @@ def MmaCode : Operand<i32> {
let PrintMethod = "printMmaCode";
}
-def Offseti32imm : Operand<i32> {
- let PrintMethod = "printOffseti32imm";
-}
-
// Get pointer to local stack.
let hasSideEffects = false in {
def MOV_DEPOT_ADDR : NVPTXInst<(outs B32:$d), (ins i32imm:$num),
@@ -1595,33 +1464,31 @@ let hasSideEffects = false in {
// copyPhysreg is hard-coded in NVPTXInstrInfo.cpp
let hasSideEffects = false, isAsCheapAsAMove = true in {
- // Class for register-to-register moves
- class MOVr<RegisterClass RC, string OpStr> :
- BasicNVPTXInst<(outs RC:$dst), (ins RC:$src),
- "mov." # OpStr>;
-
- // Class for immediate-to-register moves
- class MOVi<RegisterClass RC, string OpStr, ValueType VT, Operand IMMType, SDNode ImmNode> :
- BasicNVPTXInst<(outs RC:$dst), (ins IMMType:$src),
- "mov." # OpStr,
- [(set VT:$dst, ImmNode:$src)]>;
-}
+ let isMoveReg = true in
+ class MOVr<RegisterClass RC, string OpStr> :
+ BasicNVPTXInst<(outs RC:$dst), (ins RC:$src), "mov." # OpStr>;
-def IMOV1r : MOVr<B1, "pred">;
-def MOV16r : MOVr<B16, "b16">;
-def IMOV32r : MOVr<B32, "b32">;
-def IMOV64r : MOVr<B64, "b64">;
-def IMOV128r : MOVr<B128, "b128">;
+ let isMoveImm = true in
+ class MOVi<RegTyInfo t, string suffix> :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.Imm:$src),
+ "mov." # suffix,
+ [(set t.Ty:$dst, t.ImmNode:$src)]>;
+}
+def MOV_B1_r : MOVr<B1, "pred">;
+def MOV_B16_r : MOVr<B16, "b16">;
+def MOV_B32_r : MOVr<B32, "b32">;
+def MOV_B64_r : MOVr<B64, "b64">;
+def MOV_B128_r : MOVr<B128, "b128">;
-def IMOV1i : MOVi<B1, "pred", i1, i1imm, imm>;
-def IMOV16i : MOVi<B16, "b16", i16, i16imm, imm>;
-def IMOV32i : MOVi<B32, "b32", i32, i32imm, imm>;
-def IMOV64i : MOVi<B64, "b64", i64, i64imm, imm>;
-def FMOV16i : MOVi<B16, "b16", f16, f16imm, fpimm>;
-def BFMOV16i : MOVi<B16, "b16", bf16, bf16imm, fpimm>;
-def FMOV32i : MOVi<B32, "b32", f32, f32imm, fpimm>;
-def FMOV64i : MOVi<B64, "b64", f64, f64imm, fpimm>;
+def MOV_B1_i : MOVi<I1RT, "pred">;
+def MOV_B16_i : MOVi<I16RT, "b16">;
+def MOV_B32_i : MOVi<I32RT, "b32">;
+def MOV_B64_i : MOVi<I64RT, "b64">;
+def MOV_F16_i : MOVi<F16RT, "b16">;
+def MOV_BF16_i : MOVi<BF16RT, "b16">;
+def MOV_F32_i : MOVi<F32RT, "b32">;
+def MOV_F64_i : MOVi<F64RT, "b64">;
def to_tglobaladdr : SDNodeXForm<globaladdr, [{
@@ -1639,11 +1506,11 @@ def to_tframeindex : SDNodeXForm<frameindex, [{
return CurDAG->getTargetFrameIndex(N->getIndex(), N->getValueType(0));
}]>;
-def : Pat<(i32 globaladdr:$dst), (IMOV32i (to_tglobaladdr $dst))>;
-def : Pat<(i64 globaladdr:$dst), (IMOV64i (to_tglobaladdr $dst))>;
+def : Pat<(i32 globaladdr:$dst), (MOV_B32_i (to_tglobaladdr $dst))>;
+def : Pat<(i64 globaladdr:$dst), (MOV_B64_i (to_tglobaladdr $dst))>;
-def : Pat<(i32 externalsym:$dst), (IMOV32i (to_texternsym $dst))>;
-def : Pat<(i64 externalsym:$dst), (IMOV64i (to_texternsym $dst))>;
+def : Pat<(i32 externalsym:$dst), (MOV_B32_i (to_texternsym $dst))>;
+def : Pat<(i64 externalsym:$dst), (MOV_B64_i (to_texternsym $dst))>;
//---- Copy Frame Index ----
def LEA_ADDRi : NVPTXInst<(outs B32:$dst), (ins ADDR:$addr),
@@ -1657,45 +1524,34 @@ def : Pat<(i64 frameindex:$fi), (LEA_ADDRi64 (to_tframeindex $fi), 0)>;
//-----------------------------------
// Comparison and Selection
//-----------------------------------
+// TODO: These patterns seem very specific and brittle. We should try to find
+// a more general solution.
def cond_signed : PatLeaf<(cond), [{
return isSignedIntSetCC(N->get());
}]>;
-def cond_not_signed : PatLeaf<(cond), [{
- return !isSignedIntSetCC(N->get());
-}]>;
+// A 16-bit signed comparison of sign-extended byte extracts can be converted
+// to 32-bit comparison if we change the PRMT to sign-extend the extracted
+// bytes.
+def : Pat<(setcc (i16 (sext_inreg (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE)), i8)),
+ (i16 (sext_inreg (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE)), i8)),
+ cond_signed:$cc),
+ (SETP_i32rr (PRMT_B32rii i32:$a, 0, (to_sign_extend_selector $sel_a), PrmtNONE),
+ (PRMT_B32rii i32:$b, 0, (to_sign_extend_selector $sel_b), PrmtNONE),
+ (cond2cc $cc))>;
+
+// A 16-bit comparison of truncated byte extracts can be be converted to 32-bit
+// comparison because we know that the truncate is just trancating off zeros
+// and that the most-significant byte is also zeros so the meaning of signed and
+// unsigned comparisons will not be changed.
+def : Pat<(setcc (i16 (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE))),
+ (i16 (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE))),
+ cond:$cc),
+ (SETP_i32rr (PRMT_B32rii i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE),
+ (PRMT_B32rii i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE),
+ (cond2cc $cc))>;
-// comparisons of i8 extracted with PRMT as i32
-// It's faster to do comparison directly on i32 extracted by PRMT,
-// instead of the long conversion and sign extending.
-def: Pat<(setcc (i16 (sext_inreg (i16 (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE))), i8)),
- (i16 (sext_inreg (i16 (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE))), i8)),
- cond_signed:$cc),
- (SETP_i32rr (PRMT_B32rii i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE),
- (PRMT_B32rii i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE),
- (cond2cc $cc))>;
-
-def: Pat<(setcc (i16 (sext_inreg (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE)), i8)),
- (i16 (sext_inreg (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE)), i8)),
- cond_signed:$cc),
- (SETP_i32rr (PRMT_B32rii i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE),
- (PRMT_B32rii i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE),
- (cond2cc $cc))>;
-
-def: Pat<(setcc (i16 (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE))),
- (i16 (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE))),
- cond_signed:$cc),
- (SETP_i32rr (PRMT_B32rii i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE),
- (PRMT_B32rii i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE),
- (cond2cc $cc))>;
-
-def: Pat<(setcc (i16 (trunc (prmt i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE))),
- (i16 (trunc (prmt i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE))),
- cond_not_signed:$cc),
- (SETP_i32rr (PRMT_B32rii i32:$a, 0, byte_extract_prmt:$sel_a, PrmtNONE),
- (PRMT_B32rii i32:$b, 0, byte_extract_prmt:$sel_b, PrmtNONE),
- (cond2cc $cc))>;
def SDTDeclareArrayParam :
SDTypeProfile<0, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, SDTCisVT<2, i32>]>;
@@ -1774,6 +1630,18 @@ def : Pat<(declare_array_param externalsym:$a, imm:$align, imm:$size),
def : Pat<(declare_scalar_param externalsym:$a, imm:$size),
(DECLARE_PARAM_scalar (to_texternsym $a), imm:$size)>;
+// Call prototype wrapper, this is a dummy instruction that just prints it's
+// operand which is string defining the prototype.
+def SDTCallPrototype : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def CallPrototype :
+ SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def ProtoIdent : Operand<i32> { let PrintMethod = "printProtoIdent"; }
+def CALL_PROTOTYPE :
+ NVPTXInst<(outs), (ins ProtoIdent:$ident),
+ "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;
+
+
foreach t = [I32RT, I64RT] in {
defvar inst_name = "MOV" # t.Size # "_PARAM";
def inst_name : BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$src), "mov.b" # t.Size>;
@@ -1793,6 +1661,32 @@ defm ProxyRegB16 : ProxyRegInst<"b16", B16>;
defm ProxyRegB32 : ProxyRegInst<"b32", B32>;
defm ProxyRegB64 : ProxyRegInst<"b64", B64>;
+
+// Callseq start and end
+
+// Note: these nodes are marked as SDNPMayStore and SDNPMayLoad because
+// they define the scope in which the declared params may be used. Therefore
+// we add these flags to ensure ld.param and st.param are not sunk or hoisted
+// out of that scope.
+
+def callseq_start : SDNode<"ISD::CALLSEQ_START",
+ SDCallSeqStart<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>,
+ [SDNPHasChain, SDNPOutGlue,
+ SDNPSideEffect, SDNPMayStore, SDNPMayLoad]>;
+def callseq_end : SDNode<"ISD::CALLSEQ_END",
+ SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPSideEffect, SDNPMayStore, SDNPMayLoad]>;
+
+def Callseq_Start :
+ NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "\\{ // callseq $amt1, $amt2",
+ [(callseq_start timm:$amt1, timm:$amt2)]>;
+def Callseq_End :
+ NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "\\} // callseq $amt1",
+ [(callseq_end timm:$amt1, timm:$amt2)]>;
+
//
// Load / Store Handling
//
@@ -1805,7 +1699,6 @@ class LD<NVPTXRegClass regclass>
"\t$dst, [$addr];", []>;
let mayLoad=1, hasSideEffects=0 in {
- def LD_i8 : LD<B16>;
def LD_i16 : LD<B16>;
def LD_i32 : LD<B32>;
def LD_i64 : LD<B64>;
@@ -1821,7 +1714,6 @@ class ST<DAGOperand O>
" \t[$addr], $src;", []>;
let mayStore=1, hasSideEffects=0 in {
- def ST_i8 : ST<RI16>;
def ST_i16 : ST<RI16>;
def ST_i32 : ST<RI32>;
def ST_i64 : ST<RI64>;
@@ -1854,7 +1746,6 @@ multiclass LD_VEC<NVPTXRegClass regclass, bit support_v8 = false> {
"[$addr];", []>;
}
let mayLoad=1, hasSideEffects=0 in {
- defm LDV_i8 : LD_VEC<B16>;
defm LDV_i16 : LD_VEC<B16>;
defm LDV_i32 : LD_VEC<B32, support_v8 = true>;
defm LDV_i64 : LD_VEC<B64>;
@@ -1888,7 +1779,6 @@ multiclass ST_VEC<DAGOperand O, bit support_v8 = false> {
}
let mayStore=1, hasSideEffects=0 in {
- defm STV_i8 : ST_VEC<RI16>;
defm STV_i16 : ST_VEC<RI16>;
defm STV_i32 : ST_VEC<RI32, support_v8 = true>;
defm STV_i64 : ST_VEC<RI64>;
@@ -2058,14 +1948,14 @@ def : Pat<(i64 (anyext i32:$a)), (CVT_u64_u32 $a, CvtNONE)>;
// truncate i64
def : Pat<(i32 (trunc i64:$a)), (CVT_u32_u64 $a, CvtNONE)>;
def : Pat<(i16 (trunc i64:$a)), (CVT_u16_u64 $a, CvtNONE)>;
-def : Pat<(i1 (trunc i64:$a)), (SETP_i64ri (ANDb64ri $a, 1), 0, CmpNE)>;
+def : Pat<(i1 (trunc i64:$a)), (SETP_i64ri (AND_b64ri $a, 1), 0, CmpNE)>;
// truncate i32
def : Pat<(i16 (trunc i32:$a)), (CVT_u16_u32 $a, CvtNONE)>;
-def : Pat<(i1 (trunc i32:$a)), (SETP_i32ri (ANDb32ri $a, 1), 0, CmpNE)>;
+def : Pat<(i1 (trunc i32:$a)), (SETP_i32ri (AND_b32ri $a, 1), 0, CmpNE)>;
// truncate i16
-def : Pat<(i1 (trunc i16:$a)), (SETP_i16ri (ANDb16ri $a, 1), 0, CmpNE)>;
+def : Pat<(i1 (trunc i16:$a)), (SETP_i16ri (AND_b16ri $a, 1), 0, CmpNE)>;
// sext_inreg
def : Pat<(sext_inreg i16:$a, i8), (CVT_INREG_s16_s8 $a)>;
@@ -2309,52 +2199,20 @@ defm : CVT_ROUND<frint, CvtRNI, CvtRNI_FTZ>;
//-----------------------------------
let isTerminator=1 in {
- let isReturn=1, isBarrier=1 in
+ let isReturn=1, isBarrier=1 in
def Return : BasicNVPTXInst<(outs), (ins), "ret", [(retglue)]>;
- let isBranch=1 in
- def CBranch : NVPTXInst<(outs), (ins B1:$a, brtarget:$target),
+ let isBranch=1 in {
+ def CBranch : NVPTXInst<(outs), (ins B1:$a, brtarget:$target),
"@$a bra \t$target;",
[(brcond i1:$a, bb:$target)]>;
- let isBranch=1 in
- def CBranchOther : NVPTXInst<(outs), (ins B1:$a, brtarget:$target),
- "@!$a bra \t$target;", []>;
- let isBranch=1, isBarrier=1 in
+ let isBarrier=1 in
def GOTO : BasicNVPTXInst<(outs), (ins brtarget:$target),
- "bra.uni", [(br bb:$target)]>;
+ "bra.uni", [(br bb:$target)]>;
+ }
}
-def : Pat<(brcond i32:$a, bb:$target),
- (CBranch (SETP_i32ri $a, 0, CmpNE), bb:$target)>;
-
-// SelectionDAGBuilder::visitSWitchCase() will invert the condition of a
-// conditional branch if the target block is the next block so that the code
-// can fall through to the target block. The inversion is done by 'xor
-// condition, 1', which will be translated to (setne condition, -1). Since ptx
-// supports '@!pred bra target', we should use it.
-def : Pat<(brcond (i1 (setne i1:$a, -1)), bb:$target),
- (CBranchOther $a, bb:$target)>;
-
-// Call
-def SDT_NVPTXCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
- SDTCisVT<1, i32>]>;
-def SDT_NVPTXCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
-
-def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_NVPTXCallSeqStart,
- [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
-def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_NVPTXCallSeqEnd,
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
- SDNPSideEffect]>;
-
-def Callseq_Start :
- NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
- "\\{ // callseq $amt1, $amt2",
- [(callseq_start timm:$amt1, timm:$amt2)]>;
-def Callseq_End :
- NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
- "\\} // callseq $amt1",
- [(callseq_end timm:$amt1, timm:$amt2)]>;
// trap instruction
def trapinst : BasicNVPTXInst<(outs), (ins), "trap", [(trap)]>, Requires<[noPTXASUnreachableBug]>;
@@ -2364,18 +2222,6 @@ def trapexitinst : NVPTXInst<(outs), (ins), "trap; exit;", [(trap)]>, Requires<[
// brkpt instruction
def debugtrapinst : BasicNVPTXInst<(outs), (ins), "brkpt", [(debugtrap)]>;
-// Call prototype wrapper
-def SDTCallPrototype : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
-def CallPrototype :
- SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
-def ProtoIdent : Operand<i32> {
- let PrintMethod = "printProtoIdent";
-}
-def CALL_PROTOTYPE :
- NVPTXInst<(outs), (ins ProtoIdent:$ident),
- "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;
-
def SDTDynAllocaOp :
SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, SDTCisInt<1>, SDTCisVT<2, i32>]>;
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 0a00220..d337192 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -243,63 +243,82 @@ foreach sync = [false, true] in {
}
// vote.{all,any,uni,ballot}
-multiclass VOTE<NVPTXRegClass regclass, string mode, Intrinsic IntOp> {
- def : BasicNVPTXInst<(outs regclass:$dest), (ins B1:$pred),
- "vote." # mode,
- [(set regclass:$dest, (IntOp i1:$pred))]>,
- Requires<[hasPTX<60>, hasSM<30>]>;
-}
+let Predicates = [hasPTX<60>, hasSM<30>] in {
+ multiclass VOTE<string mode, RegTyInfo t, Intrinsic op> {
+ def : BasicNVPTXInst<(outs t.RC:$dest), (ins B1:$pred),
+ "vote." # mode # "." # t.PtxType,
+ [(set t.Ty:$dest, (op i1:$pred))]>;
+ }
-defm VOTE_ALL : VOTE<B1, "all.pred", int_nvvm_vote_all>;
-defm VOTE_ANY : VOTE<B1, "any.pred", int_nvvm_vote_any>;
-defm VOTE_UNI : VOTE<B1, "uni.pred", int_nvvm_vote_uni>;
-defm VOTE_BALLOT : VOTE<B32, "ballot.b32", int_nvvm_vote_ballot>;
+ defm VOTE_ALL : VOTE<"all", I1RT, int_nvvm_vote_all>;
+ defm VOTE_ANY : VOTE<"any", I1RT, int_nvvm_vote_any>;
+ defm VOTE_UNI : VOTE<"uni", I1RT, int_nvvm_vote_uni>;
+ defm VOTE_BALLOT : VOTE<"ballot", I32RT, int_nvvm_vote_ballot>;
+
+ // vote.sync.{all,any,uni,ballot}
+ multiclass VOTE_SYNC<string mode, RegTyInfo t, Intrinsic op> {
+ def i : BasicNVPTXInst<(outs t.RC:$dest), (ins B1:$pred, i32imm:$mask),
+ "vote.sync." # mode # "." # t.PtxType,
+ [(set t.Ty:$dest, (op imm:$mask, i1:$pred))]>;
+ def r : BasicNVPTXInst<(outs t.RC:$dest), (ins B1:$pred, B32:$mask),
+ "vote.sync." # mode # "." # t.PtxType,
+ [(set t.Ty:$dest, (op i32:$mask, i1:$pred))]>;
+ }
-// vote.sync.{all,any,uni,ballot}
-multiclass VOTE_SYNC<NVPTXRegClass regclass, string mode, Intrinsic IntOp> {
- def i : BasicNVPTXInst<(outs regclass:$dest), (ins B1:$pred, i32imm:$mask),
- "vote.sync." # mode,
- [(set regclass:$dest, (IntOp imm:$mask, i1:$pred))]>,
- Requires<[hasPTX<60>, hasSM<30>]>;
- def r : BasicNVPTXInst<(outs regclass:$dest), (ins B1:$pred, B32:$mask),
- "vote.sync." # mode,
- [(set regclass:$dest, (IntOp i32:$mask, i1:$pred))]>,
- Requires<[hasPTX<60>, hasSM<30>]>;
+ defm VOTE_SYNC_ALL : VOTE_SYNC<"all", I1RT, int_nvvm_vote_all_sync>;
+ defm VOTE_SYNC_ANY : VOTE_SYNC<"any", I1RT, int_nvvm_vote_any_sync>;
+ defm VOTE_SYNC_UNI : VOTE_SYNC<"uni", I1RT, int_nvvm_vote_uni_sync>;
+ defm VOTE_SYNC_BALLOT : VOTE_SYNC<"ballot", I32RT, int_nvvm_vote_ballot_sync>;
}
-
-defm VOTE_SYNC_ALL : VOTE_SYNC<B1, "all.pred", int_nvvm_vote_all_sync>;
-defm VOTE_SYNC_ANY : VOTE_SYNC<B1, "any.pred", int_nvvm_vote_any_sync>;
-defm VOTE_SYNC_UNI : VOTE_SYNC<B1, "uni.pred", int_nvvm_vote_uni_sync>;
-defm VOTE_SYNC_BALLOT : VOTE_SYNC<B32, "ballot.b32", int_nvvm_vote_ballot_sync>;
-
// elect.sync
+let Predicates = [hasPTX<80>, hasSM<90>] in {
def INT_ELECT_SYNC_I : BasicNVPTXInst<(outs B32:$dest, B1:$pred), (ins i32imm:$mask),
"elect.sync",
- [(set i32:$dest, i1:$pred, (int_nvvm_elect_sync imm:$mask))]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
+ [(set i32:$dest, i1:$pred, (int_nvvm_elect_sync imm:$mask))]>;
def INT_ELECT_SYNC_R : BasicNVPTXInst<(outs B32:$dest, B1:$pred), (ins B32:$mask),
"elect.sync",
- [(set i32:$dest, i1:$pred, (int_nvvm_elect_sync i32:$mask))]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
+ [(set i32:$dest, i1:$pred, (int_nvvm_elect_sync i32:$mask))]>;
+}
+
+let Predicates = [hasPTX<60>, hasSM<70>] in {
+ multiclass MATCH_ANY_SYNC<Intrinsic op, RegTyInfo t> {
+ def ii : BasicNVPTXInst<(outs B32:$dest), (ins t.Imm:$value, i32imm:$mask),
+ "match.any.sync." # t.PtxType,
+ [(set i32:$dest, (op imm:$mask, imm:$value))]>;
+ def ir : BasicNVPTXInst<(outs B32:$dest), (ins t.Imm:$value, B32:$mask),
+ "match.any.sync." # t.PtxType,
+ [(set i32:$dest, (op i32:$mask, imm:$value))]>;
+ def ri : BasicNVPTXInst<(outs B32:$dest), (ins t.RC:$value, i32imm:$mask),
+ "match.any.sync." # t.PtxType,
+ [(set i32:$dest, (op imm:$mask, t.Ty:$value))]>;
+ def rr : BasicNVPTXInst<(outs B32:$dest), (ins t.RC:$value, B32:$mask),
+ "match.any.sync." # t.PtxType,
+ [(set i32:$dest, (op i32:$mask, t.Ty:$value))]>;
+ }
-multiclass MATCH_ANY_SYNC<NVPTXRegClass regclass, string ptxtype, Intrinsic IntOp,
- Operand ImmOp> {
- def ii : BasicNVPTXInst<(outs B32:$dest), (ins ImmOp:$value, i32imm:$mask),
- "match.any.sync." # ptxtype,
- [(set i32:$dest, (IntOp imm:$mask, imm:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
- def ir : BasicNVPTXInst<(outs B32:$dest), (ins ImmOp:$value, B32:$mask),
- "match.any.sync." # ptxtype,
- [(set i32:$dest, (IntOp i32:$mask, imm:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
- def ri : BasicNVPTXInst<(outs B32:$dest), (ins regclass:$value, i32imm:$mask),
- "match.any.sync." # ptxtype,
- [(set i32:$dest, (IntOp imm:$mask, regclass:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
- def rr : BasicNVPTXInst<(outs B32:$dest), (ins regclass:$value, B32:$mask),
- "match.any.sync." # ptxtype,
- [(set i32:$dest, (IntOp i32:$mask, regclass:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
+ defm MATCH_ANY_SYNC_32 : MATCH_ANY_SYNC<int_nvvm_match_any_sync_i32, I32RT>;
+ defm MATCH_ANY_SYNC_64 : MATCH_ANY_SYNC<int_nvvm_match_any_sync_i64, I64RT>;
+
+ multiclass MATCH_ALLP_SYNC<RegTyInfo t, Intrinsic op> {
+ def ii : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
+ (ins t.Imm:$value, i32imm:$mask),
+ "match.all.sync." # t.PtxType,
+ [(set i32:$dest, i1:$pred, (op imm:$mask, imm:$value))]>;
+ def ir : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
+ (ins t.Imm:$value, B32:$mask),
+ "match.all.sync." # t.PtxType,
+ [(set i32:$dest, i1:$pred, (op i32:$mask, imm:$value))]>;
+ def ri : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
+ (ins t.RC:$value, i32imm:$mask),
+ "match.all.sync." # t.PtxType,
+ [(set i32:$dest, i1:$pred, (op imm:$mask, t.Ty:$value))]>;
+ def rr : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
+ (ins t.RC:$value, B32:$mask),
+ "match.all.sync." # t.PtxType,
+ [(set i32:$dest, i1:$pred, (op i32:$mask, t.Ty:$value))]>;
+ }
+ defm MATCH_ALLP_SYNC_32 : MATCH_ALLP_SYNC<I32RT, int_nvvm_match_all_sync_i32p>;
+ defm MATCH_ALLP_SYNC_64 : MATCH_ALLP_SYNC<I64RT, int_nvvm_match_all_sync_i64p>;
}
// activemask.b32
@@ -308,39 +327,6 @@ def ACTIVEMASK : BasicNVPTXInst<(outs B32:$dest), (ins),
[(set i32:$dest, (int_nvvm_activemask))]>,
Requires<[hasPTX<62>, hasSM<30>]>;
-defm MATCH_ANY_SYNC_32 : MATCH_ANY_SYNC<B32, "b32", int_nvvm_match_any_sync_i32,
- i32imm>;
-defm MATCH_ANY_SYNC_64 : MATCH_ANY_SYNC<B64, "b64", int_nvvm_match_any_sync_i64,
- i64imm>;
-
-multiclass MATCH_ALLP_SYNC<NVPTXRegClass regclass, string ptxtype, Intrinsic IntOp,
- Operand ImmOp> {
- def ii : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
- (ins ImmOp:$value, i32imm:$mask),
- "match.all.sync." # ptxtype,
- [(set i32:$dest, i1:$pred, (IntOp imm:$mask, imm:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
- def ir : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
- (ins ImmOp:$value, B32:$mask),
- "match.all.sync." # ptxtype,
- [(set i32:$dest, i1:$pred, (IntOp i32:$mask, imm:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
- def ri : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
- (ins regclass:$value, i32imm:$mask),
- "match.all.sync." # ptxtype,
- [(set i32:$dest, i1:$pred, (IntOp imm:$mask, regclass:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
- def rr : BasicNVPTXInst<(outs B32:$dest, B1:$pred),
- (ins regclass:$value, B32:$mask),
- "match.all.sync." # ptxtype,
- [(set i32:$dest, i1:$pred, (IntOp i32:$mask, regclass:$value))]>,
- Requires<[hasPTX<60>, hasSM<70>]>;
-}
-defm MATCH_ALLP_SYNC_32 : MATCH_ALLP_SYNC<B32, "b32", int_nvvm_match_all_sync_i32p,
- i32imm>;
-defm MATCH_ALLP_SYNC_64 : MATCH_ALLP_SYNC<B64, "b64", int_nvvm_match_all_sync_i64p,
- i64imm>;
-
multiclass REDUX_SYNC<string BinOp, string PTXType, Intrinsic Intrin> {
def : BasicNVPTXInst<(outs B32:$dst), (ins B32:$src, B32:$mask),
"redux.sync." # BinOp # "." # PTXType,
@@ -381,24 +367,20 @@ defm REDUX_SYNC_FMAX_ABS_NAN: REDUX_SYNC_F<"max", ".abs", ".NaN">;
//-----------------------------------
// Explicit Memory Fence Functions
//-----------------------------------
-class MEMBAR<string StrOp, Intrinsic IntOP> :
- BasicNVPTXInst<(outs), (ins),
- StrOp, [(IntOP)]>;
+class NullaryInst<string StrOp, Intrinsic IntOP> :
+ BasicNVPTXInst<(outs), (ins), StrOp, [(IntOP)]>;
-def INT_MEMBAR_CTA : MEMBAR<"membar.cta", int_nvvm_membar_cta>;
-def INT_MEMBAR_GL : MEMBAR<"membar.gl", int_nvvm_membar_gl>;
-def INT_MEMBAR_SYS : MEMBAR<"membar.sys", int_nvvm_membar_sys>;
+def INT_MEMBAR_CTA : NullaryInst<"membar.cta", int_nvvm_membar_cta>;
+def INT_MEMBAR_GL : NullaryInst<"membar.gl", int_nvvm_membar_gl>;
+def INT_MEMBAR_SYS : NullaryInst<"membar.sys", int_nvvm_membar_sys>;
def INT_FENCE_SC_CLUSTER:
- MEMBAR<"fence.sc.cluster", int_nvvm_fence_sc_cluster>,
+ NullaryInst<"fence.sc.cluster", int_nvvm_fence_sc_cluster>,
Requires<[hasPTX<78>, hasSM<90>]>;
// Proxy fence (uni-directional)
-// fence.proxy.tensormap.release variants
-
class FENCE_PROXY_TENSORMAP_GENERIC_RELEASE<string Scope, Intrinsic Intr> :
- BasicNVPTXInst<(outs), (ins),
- "fence.proxy.tensormap::generic.release." # Scope, [(Intr)]>,
+ NullaryInst<"fence.proxy.tensormap::generic.release." # Scope, Intr>,
Requires<[hasPTX<83>, hasSM<90>]>;
def INT_FENCE_PROXY_TENSORMAP_GENERIC_RELEASE_CTA:
@@ -488,35 +470,31 @@ defm CP_ASYNC_CG_SHARED_GLOBAL_16 :
CP_ASYNC_SHARED_GLOBAL_I<"cg", "16", int_nvvm_cp_async_cg_shared_global_16,
int_nvvm_cp_async_cg_shared_global_16_s>;
-def CP_ASYNC_COMMIT_GROUP :
- BasicNVPTXInst<(outs), (ins), "cp.async.commit_group", [(int_nvvm_cp_async_commit_group)]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
+let Predicates = [hasPTX<70>, hasSM<80>] in {
+ def CP_ASYNC_COMMIT_GROUP :
+ NullaryInst<"cp.async.commit_group", int_nvvm_cp_async_commit_group>;
-def CP_ASYNC_WAIT_GROUP :
- BasicNVPTXInst<(outs), (ins i32imm:$n), "cp.async.wait_group",
- [(int_nvvm_cp_async_wait_group timm:$n)]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
+ def CP_ASYNC_WAIT_GROUP :
+ BasicNVPTXInst<(outs), (ins i32imm:$n), "cp.async.wait_group",
+ [(int_nvvm_cp_async_wait_group timm:$n)]>;
-def CP_ASYNC_WAIT_ALL :
- BasicNVPTXInst<(outs), (ins), "cp.async.wait_all",
- [(int_nvvm_cp_async_wait_all)]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
+ def CP_ASYNC_WAIT_ALL :
+ NullaryInst<"cp.async.wait_all", int_nvvm_cp_async_wait_all>;
+}
-// cp.async.bulk variants of the commit/wait group
-def CP_ASYNC_BULK_COMMIT_GROUP :
- BasicNVPTXInst<(outs), (ins), "cp.async.bulk.commit_group",
- [(int_nvvm_cp_async_bulk_commit_group)]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
+let Predicates = [hasPTX<80>, hasSM<90>] in {
+ // cp.async.bulk variants of the commit/wait group
+ def CP_ASYNC_BULK_COMMIT_GROUP :
+ NullaryInst<"cp.async.bulk.commit_group", int_nvvm_cp_async_bulk_commit_group>;
-def CP_ASYNC_BULK_WAIT_GROUP :
- BasicNVPTXInst<(outs), (ins i32imm:$n), "cp.async.bulk.wait_group",
- [(int_nvvm_cp_async_bulk_wait_group timm:$n)]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
+ def CP_ASYNC_BULK_WAIT_GROUP :
+ BasicNVPTXInst<(outs), (ins i32imm:$n), "cp.async.bulk.wait_group",
+ [(int_nvvm_cp_async_bulk_wait_group timm:$n)]>;
-def CP_ASYNC_BULK_WAIT_GROUP_READ :
- BasicNVPTXInst<(outs), (ins i32imm:$n), "cp.async.bulk.wait_group.read",
- [(int_nvvm_cp_async_bulk_wait_group_read timm:$n)]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
+ def CP_ASYNC_BULK_WAIT_GROUP_READ :
+ BasicNVPTXInst<(outs), (ins i32imm:$n), "cp.async.bulk.wait_group.read",
+ [(int_nvvm_cp_async_bulk_wait_group_read timm:$n)]>;
+}
//------------------------------
// TMA Async Bulk Copy Functions
@@ -974,33 +952,30 @@ defm TMA_TENSOR_PF_TILE_GATHER4_2D : TMA_TENSOR_PREFETCH_INTR<5, "tile_gather4",
//Prefetch and Prefetchu
-class PREFETCH_INTRS<string InstName> :
- BasicNVPTXInst<(outs), (ins ADDR:$addr),
- InstName,
- [(!cast<Intrinsic>(!strconcat("int_nvvm_",
- !subst(".", "_", InstName))) addr:$addr)]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
-
+let Predicates = [hasPTX<80>, hasSM<90>] in {
+ class PREFETCH_INTRS<string InstName> :
+ BasicNVPTXInst<(outs), (ins ADDR:$addr),
+ InstName,
+ [(!cast<Intrinsic>(!strconcat("int_nvvm_",
+ !subst(".", "_", InstName))) addr:$addr)]>;
-def PREFETCH_L1 : PREFETCH_INTRS<"prefetch.L1">;
-def PREFETCH_L2 : PREFETCH_INTRS<"prefetch.L2">;
-def PREFETCH_GLOBAL_L1 : PREFETCH_INTRS<"prefetch.global.L1">;
-def PREFETCH_LOCAL_L1 : PREFETCH_INTRS<"prefetch.local.L1">;
-def PREFETCH_GLOBAL_L2 : PREFETCH_INTRS<"prefetch.global.L2">;
-def PREFETCH_LOCAL_L2 : PREFETCH_INTRS<"prefetch.local.L2">;
+ def PREFETCH_L1 : PREFETCH_INTRS<"prefetch.L1">;
+ def PREFETCH_L2 : PREFETCH_INTRS<"prefetch.L2">;
+ def PREFETCH_GLOBAL_L1 : PREFETCH_INTRS<"prefetch.global.L1">;
+ def PREFETCH_LOCAL_L1 : PREFETCH_INTRS<"prefetch.local.L1">;
+ def PREFETCH_GLOBAL_L2 : PREFETCH_INTRS<"prefetch.global.L2">;
+ def PREFETCH_LOCAL_L2 : PREFETCH_INTRS<"prefetch.local.L2">;
-def PREFETCH_GLOBAL_L2_EVICT_NORMAL : BasicNVPTXInst<(outs), (ins ADDR:$addr),
- "prefetch.global.L2::evict_normal",
- [(int_nvvm_prefetch_global_L2_evict_normal addr:$addr)]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
+ def PREFETCH_GLOBAL_L2_EVICT_NORMAL : BasicNVPTXInst<(outs), (ins ADDR:$addr),
+ "prefetch.global.L2::evict_normal",
+ [(int_nvvm_prefetch_global_L2_evict_normal addr:$addr)]>;
-def PREFETCH_GLOBAL_L2_EVICT_LAST : BasicNVPTXInst<(outs), (ins ADDR:$addr),
- "prefetch.global.L2::evict_last",
- [(int_nvvm_prefetch_global_L2_evict_last addr:$addr)]>,
- Requires<[hasPTX<80>, hasSM<90>]>;
+ def PREFETCH_GLOBAL_L2_EVICT_LAST : BasicNVPTXInst<(outs), (ins ADDR:$addr),
+ "prefetch.global.L2::evict_last",
+ [(int_nvvm_prefetch_global_L2_evict_last addr:$addr)]>;
-
-def PREFETCHU_L1 : PREFETCH_INTRS<"prefetchu.L1">;
+ def PREFETCHU_L1 : PREFETCH_INTRS<"prefetchu.L1">;
+}
//Applypriority intrinsics
class APPLYPRIORITY_L2_INTRS<string addrspace> :
@@ -1031,99 +1006,82 @@ def DISCARD_GLOBAL_L2 : DISCARD_L2_INTRS<"global">;
// MBarrier Functions
//-----------------------------------
-multiclass MBARRIER_INIT<string AddrSpace, Intrinsic Intrin> {
- def "" : BasicNVPTXInst<(outs), (ins ADDR:$addr, B32:$count),
- "mbarrier.init" # AddrSpace # ".b64",
- [(Intrin addr:$addr, i32:$count)]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
-}
-
-defm MBARRIER_INIT : MBARRIER_INIT<"", int_nvvm_mbarrier_init>;
-defm MBARRIER_INIT_SHARED : MBARRIER_INIT<".shared",
- int_nvvm_mbarrier_init_shared>;
-
-multiclass MBARRIER_INVAL<string AddrSpace, Intrinsic Intrin> {
- def "" : BasicNVPTXInst<(outs), (ins ADDR:$addr),
- "mbarrier.inval" # AddrSpace # ".b64",
- [(Intrin addr:$addr)]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
-}
-
-defm MBARRIER_INVAL : MBARRIER_INVAL<"", int_nvvm_mbarrier_inval>;
-defm MBARRIER_INVAL_SHARED : MBARRIER_INVAL<".shared",
- int_nvvm_mbarrier_inval_shared>;
-
-multiclass MBARRIER_ARRIVE<string AddrSpace, Intrinsic Intrin> {
- def "" : BasicNVPTXInst<(outs B64:$state), (ins ADDR:$addr),
- "mbarrier.arrive" # AddrSpace # ".b64",
- [(set i64:$state, (Intrin addr:$addr))]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
-}
-
-defm MBARRIER_ARRIVE : MBARRIER_ARRIVE<"", int_nvvm_mbarrier_arrive>;
-defm MBARRIER_ARRIVE_SHARED :
- MBARRIER_ARRIVE<".shared", int_nvvm_mbarrier_arrive_shared>;
-
-multiclass MBARRIER_ARRIVE_NOCOMPLETE<string AddrSpace, Intrinsic Intrin> {
- def "" : BasicNVPTXInst<(outs B64:$state),
- (ins ADDR:$addr, B32:$count),
- "mbarrier.arrive.noComplete" # AddrSpace # ".b64",
- [(set i64:$state, (Intrin addr:$addr, i32:$count))]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
-}
-
-defm MBARRIER_ARRIVE_NOCOMPLETE :
- MBARRIER_ARRIVE_NOCOMPLETE<"", int_nvvm_mbarrier_arrive_noComplete>;
-defm MBARRIER_ARRIVE_NOCOMPLETE_SHARED :
- MBARRIER_ARRIVE_NOCOMPLETE<".shared", int_nvvm_mbarrier_arrive_noComplete_shared>;
-
-multiclass MBARRIER_ARRIVE_DROP<string AddrSpace, Intrinsic Intrin> {
- def "" : BasicNVPTXInst<(outs B64:$state), (ins ADDR:$addr),
- "mbarrier.arrive_drop" # AddrSpace # ".b64",
- [(set i64:$state, (Intrin addr:$addr))]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
-}
-
-defm MBARRIER_ARRIVE_DROP :
- MBARRIER_ARRIVE_DROP<"", int_nvvm_mbarrier_arrive_drop>;
-defm MBARRIER_ARRIVE_DROP_SHARED :
- MBARRIER_ARRIVE_DROP<".shared", int_nvvm_mbarrier_arrive_drop_shared>;
-
-multiclass MBARRIER_ARRIVE_DROP_NOCOMPLETE<string AddrSpace, Intrinsic Intrin> {
- def "" : BasicNVPTXInst<(outs B64:$state),
- (ins ADDR:$addr, B32:$count),
- "mbarrier.arrive_drop.noComplete" # AddrSpace # ".b64",
- [(set i64:$state, (Intrin addr:$addr, i32:$count))]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
-}
-
-defm MBARRIER_ARRIVE_DROP_NOCOMPLETE :
- MBARRIER_ARRIVE_DROP_NOCOMPLETE<"", int_nvvm_mbarrier_arrive_drop_noComplete>;
-defm MBARRIER_ARRIVE_DROP_NOCOMPLETE_SHARED :
- MBARRIER_ARRIVE_DROP_NOCOMPLETE<".shared",
- int_nvvm_mbarrier_arrive_drop_noComplete_shared>;
-
-multiclass MBARRIER_TEST_WAIT<string AddrSpace, Intrinsic Intrin> {
- def "" : BasicNVPTXInst<(outs B1:$res), (ins ADDR:$addr, B64:$state),
- "mbarrier.test_wait" # AddrSpace # ".b64",
- [(set i1:$res, (Intrin addr:$addr, i64:$state))]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
+let Predicates = [hasPTX<70>, hasSM<80>] in {
+ class MBARRIER_INIT<string AddrSpace, Intrinsic Intrin> :
+ BasicNVPTXInst<(outs), (ins ADDR:$addr, B32:$count),
+ "mbarrier.init" # AddrSpace # ".b64",
+ [(Intrin addr:$addr, i32:$count)]>;
+
+ def MBARRIER_INIT : MBARRIER_INIT<"", int_nvvm_mbarrier_init>;
+ def MBARRIER_INIT_SHARED : MBARRIER_INIT<".shared",
+ int_nvvm_mbarrier_init_shared>;
+
+ class MBARRIER_INVAL<string AddrSpace, Intrinsic Intrin> :
+ BasicNVPTXInst<(outs), (ins ADDR:$addr),
+ "mbarrier.inval" # AddrSpace # ".b64",
+ [(Intrin addr:$addr)]>;
+
+ def MBARRIER_INVAL : MBARRIER_INVAL<"", int_nvvm_mbarrier_inval>;
+ def MBARRIER_INVAL_SHARED : MBARRIER_INVAL<".shared",
+ int_nvvm_mbarrier_inval_shared>;
+
+ class MBARRIER_ARRIVE<string AddrSpace, Intrinsic Intrin> :
+ BasicNVPTXInst<(outs B64:$state), (ins ADDR:$addr),
+ "mbarrier.arrive" # AddrSpace # ".b64",
+ [(set i64:$state, (Intrin addr:$addr))]>;
+
+ def MBARRIER_ARRIVE : MBARRIER_ARRIVE<"", int_nvvm_mbarrier_arrive>;
+ def MBARRIER_ARRIVE_SHARED :
+ MBARRIER_ARRIVE<".shared", int_nvvm_mbarrier_arrive_shared>;
+
+ class MBARRIER_ARRIVE_NOCOMPLETE<string AddrSpace, Intrinsic Intrin> :
+ BasicNVPTXInst<(outs B64:$state),
+ (ins ADDR:$addr, B32:$count),
+ "mbarrier.arrive.noComplete" # AddrSpace # ".b64",
+ [(set i64:$state, (Intrin addr:$addr, i32:$count))]>;
+
+ def MBARRIER_ARRIVE_NOCOMPLETE :
+ MBARRIER_ARRIVE_NOCOMPLETE<"", int_nvvm_mbarrier_arrive_noComplete>;
+ def MBARRIER_ARRIVE_NOCOMPLETE_SHARED :
+ MBARRIER_ARRIVE_NOCOMPLETE<".shared", int_nvvm_mbarrier_arrive_noComplete_shared>;
+
+ class MBARRIER_ARRIVE_DROP<string AddrSpace, Intrinsic Intrin> :
+ BasicNVPTXInst<(outs B64:$state), (ins ADDR:$addr),
+ "mbarrier.arrive_drop" # AddrSpace # ".b64",
+ [(set i64:$state, (Intrin addr:$addr))]>;
+
+ def MBARRIER_ARRIVE_DROP :
+ MBARRIER_ARRIVE_DROP<"", int_nvvm_mbarrier_arrive_drop>;
+ def MBARRIER_ARRIVE_DROP_SHARED :
+ MBARRIER_ARRIVE_DROP<".shared", int_nvvm_mbarrier_arrive_drop_shared>;
+
+ class MBARRIER_ARRIVE_DROP_NOCOMPLETE<string AddrSpace, Intrinsic Intrin> :
+ BasicNVPTXInst<(outs B64:$state),
+ (ins ADDR:$addr, B32:$count),
+ "mbarrier.arrive_drop.noComplete" # AddrSpace # ".b64",
+ [(set i64:$state, (Intrin addr:$addr, i32:$count))]>;
+
+ def MBARRIER_ARRIVE_DROP_NOCOMPLETE :
+ MBARRIER_ARRIVE_DROP_NOCOMPLETE<"", int_nvvm_mbarrier_arrive_drop_noComplete>;
+ def MBARRIER_ARRIVE_DROP_NOCOMPLETE_SHARED :
+ MBARRIER_ARRIVE_DROP_NOCOMPLETE<".shared",
+ int_nvvm_mbarrier_arrive_drop_noComplete_shared>;
+
+ class MBARRIER_TEST_WAIT<string AddrSpace, Intrinsic Intrin> :
+ BasicNVPTXInst<(outs B1:$res), (ins ADDR:$addr, B64:$state),
+ "mbarrier.test_wait" # AddrSpace # ".b64",
+ [(set i1:$res, (Intrin addr:$addr, i64:$state))]>;
+
+ def MBARRIER_TEST_WAIT :
+ MBARRIER_TEST_WAIT<"", int_nvvm_mbarrier_test_wait>;
+ def MBARRIER_TEST_WAIT_SHARED :
+ MBARRIER_TEST_WAIT<".shared", int_nvvm_mbarrier_test_wait_shared>;
+
+ def MBARRIER_PENDING_COUNT :
+ BasicNVPTXInst<(outs B32:$res), (ins B64:$state),
+ "mbarrier.pending_count.b64",
+ [(set i32:$res, (int_nvvm_mbarrier_pending_count i64:$state))]>;
}
-
-defm MBARRIER_TEST_WAIT :
- MBARRIER_TEST_WAIT<"", int_nvvm_mbarrier_test_wait>;
-defm MBARRIER_TEST_WAIT_SHARED :
- MBARRIER_TEST_WAIT<".shared", int_nvvm_mbarrier_test_wait_shared>;
-
-class MBARRIER_PENDING_COUNT<Intrinsic Intrin> :
- BasicNVPTXInst<(outs B32:$res), (ins B64:$state),
- "mbarrier.pending_count.b64",
- [(set i32:$res, (Intrin i64:$state))]>,
- Requires<[hasPTX<70>, hasSM<80>]>;
-
-def MBARRIER_PENDING_COUNT :
- MBARRIER_PENDING_COUNT<int_nvvm_mbarrier_pending_count>;
-
//-----------------------------------
// Math Functions
//-----------------------------------
@@ -1449,15 +1407,11 @@ defm ABS_F64 : F_ABS<"f64", F64RT, support_ftz = false>;
def fcopysign_nvptx : SDNode<"NVPTXISD::FCOPYSIGN", SDTFPBinOp>;
-def COPYSIGN_F :
- BasicNVPTXInst<(outs B32:$dst), (ins B32:$src0, B32:$src1),
- "copysign.f32",
- [(set f32:$dst, (fcopysign_nvptx f32:$src1, f32:$src0))]>;
-
-def COPYSIGN_D :
- BasicNVPTXInst<(outs B64:$dst), (ins B64:$src0, B64:$src1),
- "copysign.f64",
- [(set f64:$dst, (fcopysign_nvptx f64:$src1, f64:$src0))]>;
+foreach t = [F32RT, F64RT] in
+ def COPYSIGN_ # t :
+ BasicNVPTXInst<(outs t.RC:$dst), (ins t.RC:$src0, t.RC:$src1),
+ "copysign." # t.PtxType,
+ [(set t.Ty:$dst, (fcopysign_nvptx t.Ty:$src1, t.Ty:$src0))]>;
//
// Neg bf16, bf16x2
@@ -2255,38 +2209,35 @@ defm INT_PTX_SATOM_XOR : ATOM2_bitwise_impl<"xor">;
// Scalar
-class LDU_G<string TyStr, NVPTXRegClass regclass>
- : NVPTXInst<(outs regclass:$result), (ins ADDR:$src),
- "ldu.global." # TyStr # " \t$result, [$src];", []>;
+class LDU_G<NVPTXRegClass regclass>
+ : NVPTXInst<(outs regclass:$result), (ins i32imm:$fromWidth, ADDR:$src),
+ "ldu.global.b$fromWidth \t$result, [$src];", []>;
-def LDU_GLOBAL_i8 : LDU_G<"b8", B16>;
-def LDU_GLOBAL_i16 : LDU_G<"b16", B16>;
-def LDU_GLOBAL_i32 : LDU_G<"b32", B32>;
-def LDU_GLOBAL_i64 : LDU_G<"b64", B64>;
+def LDU_GLOBAL_i16 : LDU_G<B16>;
+def LDU_GLOBAL_i32 : LDU_G<B32>;
+def LDU_GLOBAL_i64 : LDU_G<B64>;
// vector
// Elementized vector ldu
-class VLDU_G_ELE_V2<string TyStr, NVPTXRegClass regclass>
+class VLDU_G_ELE_V2<NVPTXRegClass regclass>
: NVPTXInst<(outs regclass:$dst1, regclass:$dst2),
- (ins ADDR:$src),
- "ldu.global.v2." # TyStr # " \t{{$dst1, $dst2}}, [$src];", []>;
+ (ins i32imm:$fromWidth, ADDR:$src),
+ "ldu.global.v2.b$fromWidth \t{{$dst1, $dst2}}, [$src];", []>;
-class VLDU_G_ELE_V4<string TyStr, NVPTXRegClass regclass>
- : NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3,
- regclass:$dst4), (ins ADDR:$src),
- "ldu.global.v4." # TyStr # " \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];", []>;
+class VLDU_G_ELE_V4<NVPTXRegClass regclass>
+ : NVPTXInst<(outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins i32imm:$fromWidth, ADDR:$src),
+ "ldu.global.v4.b$fromWidth \t{{$dst1, $dst2, $dst3, $dst4}}, [$src];", []>;
-def LDU_GLOBAL_v2i8 : VLDU_G_ELE_V2<"b8", B16>;
-def LDU_GLOBAL_v2i16 : VLDU_G_ELE_V2<"b16", B16>;
-def LDU_GLOBAL_v2i32 : VLDU_G_ELE_V2<"b32", B32>;
-def LDU_GLOBAL_v2i64 : VLDU_G_ELE_V2<"b64", B64>;
+def LDU_GLOBAL_v2i16 : VLDU_G_ELE_V2<B16>;
+def LDU_GLOBAL_v2i32 : VLDU_G_ELE_V2<B32>;
+def LDU_GLOBAL_v2i64 : VLDU_G_ELE_V2<B64>;
-def LDU_GLOBAL_v4i8 : VLDU_G_ELE_V4<"b8", B16>;
-def LDU_GLOBAL_v4i16 : VLDU_G_ELE_V4<"b16", B16>;
-def LDU_GLOBAL_v4i32 : VLDU_G_ELE_V4<"b32", B32>;
+def LDU_GLOBAL_v4i16 : VLDU_G_ELE_V4<B16>;
+def LDU_GLOBAL_v4i32 : VLDU_G_ELE_V4<B32>;
//-----------------------------------
@@ -2327,12 +2278,10 @@ class VLDG_G_ELE_V8<NVPTXRegClass regclass> :
"ld.global.nc.v8.${Sign:sign}$fromWidth \t{{$dst1, $dst2, $dst3, $dst4, $dst5, $dst6, $dst7, $dst8}}, [$src];", []>;
// FIXME: 8-bit LDG should be fixed once LDG/LDU nodes are made into proper loads.
-def LD_GLOBAL_NC_v2i8 : VLDG_G_ELE_V2<B16>;
def LD_GLOBAL_NC_v2i16 : VLDG_G_ELE_V2<B16>;
def LD_GLOBAL_NC_v2i32 : VLDG_G_ELE_V2<B32>;
def LD_GLOBAL_NC_v2i64 : VLDG_G_ELE_V2<B64>;
-def LD_GLOBAL_NC_v4i8 : VLDG_G_ELE_V4<B16>;
def LD_GLOBAL_NC_v4i16 : VLDG_G_ELE_V4<B16>;
def LD_GLOBAL_NC_v4i32 : VLDG_G_ELE_V4<B32>;
@@ -2342,19 +2291,19 @@ def LD_GLOBAL_NC_v8i32 : VLDG_G_ELE_V8<B32>;
multiclass NG_TO_G<string Str, bit Supports32 = 1, list<Predicate> Preds = []> {
if Supports32 then
def "" : BasicNVPTXInst<(outs B32:$result), (ins B32:$src),
- "cvta." # Str # ".u32", []>, Requires<Preds>;
+ "cvta." # Str # ".u32">, Requires<Preds>;
def _64 : BasicNVPTXInst<(outs B64:$result), (ins B64:$src),
- "cvta." # Str # ".u64", []>, Requires<Preds>;
+ "cvta." # Str # ".u64">, Requires<Preds>;
}
multiclass G_TO_NG<string Str, bit Supports32 = 1, list<Predicate> Preds = []> {
if Supports32 then
def "" : BasicNVPTXInst<(outs B32:$result), (ins B32:$src),
- "cvta.to." # Str # ".u32", []>, Requires<Preds>;
+ "cvta.to." # Str # ".u32">, Requires<Preds>;
def _64 : BasicNVPTXInst<(outs B64:$result), (ins B64:$src),
- "cvta.to." # Str # ".u64", []>, Requires<Preds>;
+ "cvta.to." # Str # ".u64">, Requires<Preds>;
}
foreach space = ["local", "shared", "global", "const", "param"] in {
@@ -4614,9 +4563,9 @@ def INT_PTX_SREG_LANEMASK_GT :
PTX_READ_SREG_R32<"lanemask_gt", int_nvvm_read_ptx_sreg_lanemask_gt>;
let hasSideEffects = 1 in {
-def SREG_CLOCK : PTX_READ_SREG_R32<"clock", int_nvvm_read_ptx_sreg_clock>;
-def SREG_CLOCK64 : PTX_READ_SREG_R64<"clock64", int_nvvm_read_ptx_sreg_clock64>;
-def SREG_GLOBALTIMER : PTX_READ_SREG_R64<"globaltimer", int_nvvm_read_ptx_sreg_globaltimer>;
+ def SREG_CLOCK : PTX_READ_SREG_R32<"clock", int_nvvm_read_ptx_sreg_clock>;
+ def SREG_CLOCK64 : PTX_READ_SREG_R64<"clock64", int_nvvm_read_ptx_sreg_clock64>;
+ def SREG_GLOBALTIMER : PTX_READ_SREG_R64<"globaltimer", int_nvvm_read_ptx_sreg_globaltimer>;
}
def: Pat <(i64 (readcyclecounter)), (SREG_CLOCK64)>;
@@ -5096,37 +5045,36 @@ foreach mma = !listconcat(MMAs, WMMAs, MMA_LDSTs, LDMATRIXs, STMATRIXs) in
def : MMA_PAT<mma>;
multiclass MAPA<string suffix, Intrinsic Intr> {
- def _32: BasicNVPTXInst<(outs B32:$d), (ins B32:$a, B32:$b),
- "mapa" # suffix # ".u32",
- [(set i32:$d, (Intr i32:$a, i32:$b))]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
- def _32i: BasicNVPTXInst<(outs B32:$d), (ins B32:$a, i32imm:$b),
- "mapa" # suffix # ".u32",
- [(set i32:$d, (Intr i32:$a, imm:$b))]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
- def _64: BasicNVPTXInst<(outs B64:$d), (ins B64:$a, B32:$b),
- "mapa" # suffix # ".u64",
- [(set i64:$d, (Intr i64:$a, i32:$b))]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
- def _64i: BasicNVPTXInst<(outs B64:$d), (ins B64:$a, i32imm:$b),
- "mapa" # suffix # ".u64",
- [(set i64:$d, (Intr i64:$a, imm:$b))]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
+ let Predicates = [hasSM<90>, hasPTX<78>] in {
+ def _32: BasicNVPTXInst<(outs B32:$d), (ins B32:$a, B32:$b),
+ "mapa" # suffix # ".u32",
+ [(set i32:$d, (Intr i32:$a, i32:$b))]>;
+ def _32i: BasicNVPTXInst<(outs B32:$d), (ins B32:$a, i32imm:$b),
+ "mapa" # suffix # ".u32",
+ [(set i32:$d, (Intr i32:$a, imm:$b))]>;
+ def _64: BasicNVPTXInst<(outs B64:$d), (ins B64:$a, B32:$b),
+ "mapa" # suffix # ".u64",
+ [(set i64:$d, (Intr i64:$a, i32:$b))]>;
+ def _64i: BasicNVPTXInst<(outs B64:$d), (ins B64:$a, i32imm:$b),
+ "mapa" # suffix # ".u64",
+ [(set i64:$d, (Intr i64:$a, imm:$b))]>;
+ }
}
+
defm mapa : MAPA<"", int_nvvm_mapa>;
defm mapa_shared_cluster : MAPA<".shared::cluster", int_nvvm_mapa_shared_cluster>;
multiclass GETCTARANK<string suffix, Intrinsic Intr> {
- def _32: BasicNVPTXInst<(outs B32:$d), (ins B32:$a),
- "getctarank" # suffix # ".u32",
- [(set i32:$d, (Intr i32:$a))]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
- def _64: BasicNVPTXInst<(outs B32:$d), (ins B64:$a),
- "getctarank" # suffix # ".u64",
- [(set i32:$d, (Intr i64:$a))]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
+ let Predicates = [hasSM<90>, hasPTX<78>] in {
+ def _32: BasicNVPTXInst<(outs B32:$d), (ins B32:$a),
+ "getctarank" # suffix # ".u32",
+ [(set i32:$d, (Intr i32:$a))]>;
+ def _64: BasicNVPTXInst<(outs B32:$d), (ins B64:$a),
+ "getctarank" # suffix # ".u64",
+ [(set i32:$d, (Intr i64:$a))]>;
+ }
}
defm getctarank : GETCTARANK<"", int_nvvm_getctarank>;
@@ -5165,29 +5113,25 @@ def INT_NVVM_WGMMA_WAIT_GROUP_SYNC_ALIGNED : BasicNVPTXInst<(outs), (ins i64imm:
[(int_nvvm_wgmma_wait_group_sync_aligned timm:$n)]>, Requires<[hasSM90a, hasPTX<80>]>;
} // isConvergent = true
-def GRIDDEPCONTROL_LAUNCH_DEPENDENTS :
- BasicNVPTXInst<(outs), (ins),
- "griddepcontrol.launch_dependents",
- [(int_nvvm_griddepcontrol_launch_dependents)]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
-
-def GRIDDEPCONTROL_WAIT :
- BasicNVPTXInst<(outs), (ins),
- "griddepcontrol.wait",
- [(int_nvvm_griddepcontrol_wait)]>,
- Requires<[hasSM<90>, hasPTX<78>]>;
+let Predicates = [hasSM<90>, hasPTX<78>] in {
+ def GRIDDEPCONTROL_LAUNCH_DEPENDENTS :
+ BasicNVPTXInst<(outs), (ins), "griddepcontrol.launch_dependents",
+ [(int_nvvm_griddepcontrol_launch_dependents)]>;
+ def GRIDDEPCONTROL_WAIT :
+ BasicNVPTXInst<(outs), (ins), "griddepcontrol.wait",
+ [(int_nvvm_griddepcontrol_wait)]>;
+}
def INT_EXIT : BasicNVPTXInst<(outs), (ins), "exit", [(int_nvvm_exit)]>;
// Tcgen05 intrinsics
-let isConvergent = true in {
+let isConvergent = true, Predicates = [hasTcgen05Instructions] in {
multiclass TCGEN05_ALLOC_INTR<string AS, string num, Intrinsic Intr> {
def "" : BasicNVPTXInst<(outs),
(ins ADDR:$dst, B32:$ncols),
"tcgen05.alloc.cta_group::" # num # ".sync.aligned" # AS # ".b32",
- [(Intr addr:$dst, B32:$ncols)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(Intr addr:$dst, B32:$ncols)]>;
}
defm TCGEN05_ALLOC_CG1 : TCGEN05_ALLOC_INTR<"", "1", int_nvvm_tcgen05_alloc_cg1>;
@@ -5200,8 +5144,7 @@ multiclass TCGEN05_DEALLOC_INTR<string num, Intrinsic Intr> {
def "" : BasicNVPTXInst<(outs),
(ins B32:$tmem_addr, B32:$ncols),
"tcgen05.dealloc.cta_group::" # num # ".sync.aligned.b32",
- [(Intr B32:$tmem_addr, B32:$ncols)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(Intr B32:$tmem_addr, B32:$ncols)]>;
}
defm TCGEN05_DEALLOC_CG1: TCGEN05_DEALLOC_INTR<"1", int_nvvm_tcgen05_dealloc_cg1>;
defm TCGEN05_DEALLOC_CG2: TCGEN05_DEALLOC_INTR<"2", int_nvvm_tcgen05_dealloc_cg2>;
@@ -5209,19 +5152,13 @@ defm TCGEN05_DEALLOC_CG2: TCGEN05_DEALLOC_INTR<"2", int_nvvm_tcgen05_dealloc_cg2
multiclass TCGEN05_RELINQ_PERMIT_INTR<string num, Intrinsic Intr> {
def "" : BasicNVPTXInst<(outs), (ins),
"tcgen05.relinquish_alloc_permit.cta_group::" # num # ".sync.aligned",
- [(Intr)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(Intr)]>;
}
defm TCGEN05_RELINQ_CG1: TCGEN05_RELINQ_PERMIT_INTR<"1", int_nvvm_tcgen05_relinq_alloc_permit_cg1>;
defm TCGEN05_RELINQ_CG2: TCGEN05_RELINQ_PERMIT_INTR<"2", int_nvvm_tcgen05_relinq_alloc_permit_cg2>;
-def tcgen05_wait_ld: BasicNVPTXInst<(outs), (ins), "tcgen05.wait::ld.sync.aligned",
- [(int_nvvm_tcgen05_wait_ld)]>,
- Requires<[hasTcgen05Instructions]>;
-
-def tcgen05_wait_st: BasicNVPTXInst<(outs), (ins), "tcgen05.wait::st.sync.aligned",
- [(int_nvvm_tcgen05_wait_st)]>,
- Requires<[hasTcgen05Instructions]>;
+def tcgen05_wait_ld: NullaryInst<"tcgen05.wait::ld.sync.aligned", int_nvvm_tcgen05_wait_ld>;
+def tcgen05_wait_st: NullaryInst<"tcgen05.wait::st.sync.aligned", int_nvvm_tcgen05_wait_st>;
multiclass TCGEN05_COMMIT_INTR<string AS, string num> {
defvar prefix = "tcgen05.commit.cta_group::" # num #".mbarrier::arrive::one.shared::cluster";
@@ -5232,12 +5169,10 @@ multiclass TCGEN05_COMMIT_INTR<string AS, string num> {
def "" : BasicNVPTXInst<(outs), (ins ADDR:$mbar),
prefix # ".b64",
- [(Intr addr:$mbar)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(Intr addr:$mbar)]>;
def _MC : BasicNVPTXInst<(outs), (ins ADDR:$mbar, B16:$mc),
prefix # ".multicast::cluster.b64",
- [(IntrMC addr:$mbar, B16:$mc)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(IntrMC addr:$mbar, B16:$mc)]>;
}
defm TCGEN05_COMMIT_CG1 : TCGEN05_COMMIT_INTR<"", "1">;
@@ -5249,8 +5184,7 @@ multiclass TCGEN05_SHIFT_INTR<string num, Intrinsic Intr> {
def "" : BasicNVPTXInst<(outs),
(ins ADDR:$tmem_addr),
"tcgen05.shift.cta_group::" # num # ".down",
- [(Intr addr:$tmem_addr)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(Intr addr:$tmem_addr)]>;
}
defm TCGEN05_SHIFT_CG1: TCGEN05_SHIFT_INTR<"1", int_nvvm_tcgen05_shift_down_cg1>;
defm TCGEN05_SHIFT_CG2: TCGEN05_SHIFT_INTR<"2", int_nvvm_tcgen05_shift_down_cg2>;
@@ -5270,13 +5204,11 @@ multiclass TCGEN05_CP_INTR<string shape, string src_fmt, string mc = ""> {
def _cg1 : BasicNVPTXInst<(outs),
(ins ADDR:$tmem_addr, B64:$sdesc),
"tcgen05.cp.cta_group::1." # shape_mc_asm # fmt_asm,
- [(IntrCG1 addr:$tmem_addr, B64:$sdesc)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(IntrCG1 addr:$tmem_addr, B64:$sdesc)]>;
def _cg2 : BasicNVPTXInst<(outs),
(ins ADDR:$tmem_addr, B64:$sdesc),
"tcgen05.cp.cta_group::2." # shape_mc_asm # fmt_asm,
- [(IntrCG2 addr:$tmem_addr, B64:$sdesc)]>,
- Requires<[hasTcgen05Instructions]>;
+ [(IntrCG2 addr:$tmem_addr, B64:$sdesc)]>;
}
foreach src_fmt = ["", "b6x16_p32", "b4x16_p64"] in {
@@ -5289,17 +5221,13 @@ foreach src_fmt = ["", "b6x16_p32", "b4x16_p64"] in {
}
} // isConvergent
-let hasSideEffects = 1 in {
+let hasSideEffects = 1, Predicates = [hasTcgen05Instructions] in {
-def tcgen05_fence_before_thread_sync: BasicNVPTXInst<(outs), (ins),
- "tcgen05.fence::before_thread_sync",
- [(int_nvvm_tcgen05_fence_before_thread_sync)]>,
- Requires<[hasTcgen05Instructions]>;
+ def tcgen05_fence_before_thread_sync: NullaryInst<
+ "tcgen05.fence::before_thread_sync", int_nvvm_tcgen05_fence_before_thread_sync>;
-def tcgen05_fence_after_thread_sync: BasicNVPTXInst<(outs), (ins),
- "tcgen05.fence::after_thread_sync",
- [(int_nvvm_tcgen05_fence_after_thread_sync)]>,
- Requires<[hasTcgen05Instructions]>;
+ def tcgen05_fence_after_thread_sync: NullaryInst<
+ "tcgen05.fence::after_thread_sync", int_nvvm_tcgen05_fence_after_thread_sync>;
} // hasSideEffects
@@ -5392,17 +5320,17 @@ foreach shape = ["16x64b", "16x128b", "16x256b", "32x32b", "16x32bx2"] in {
// Bulk store instructions
def st_bulk_imm : TImmLeaf<i64, [{ return Imm == 0; }]>;
-def INT_NVVM_ST_BULK_GENERIC :
- BasicNVPTXInst<(outs), (ins ADDR:$dest_addr, B64:$size, i64imm:$value),
- "st.bulk",
- [(int_nvvm_st_bulk addr:$dest_addr, i64:$size, st_bulk_imm:$value)]>,
- Requires<[hasSM<100>, hasPTX<86>]>;
+let Predicates = [hasSM<100>, hasPTX<86>] in {
+ def INT_NVVM_ST_BULK_GENERIC :
+ BasicNVPTXInst<(outs), (ins ADDR:$dest_addr, B64:$size, i64imm:$value),
+ "st.bulk",
+ [(int_nvvm_st_bulk addr:$dest_addr, i64:$size, st_bulk_imm:$value)]>;
-def INT_NVVM_ST_BULK_SHARED_CTA:
- BasicNVPTXInst<(outs), (ins ADDR:$dest_addr, B64:$size, i64imm:$value),
- "st.bulk.shared::cta",
- [(int_nvvm_st_bulk_shared_cta addr:$dest_addr, i64:$size, st_bulk_imm:$value)]>,
- Requires<[hasSM<100>, hasPTX<86>]>;
+ def INT_NVVM_ST_BULK_SHARED_CTA:
+ BasicNVPTXInst<(outs), (ins ADDR:$dest_addr, B64:$size, i64imm:$value),
+ "st.bulk.shared::cta",
+ [(int_nvvm_st_bulk_shared_cta addr:$dest_addr, i64:$size, st_bulk_imm:$value)]>;
+}
//
// clusterlaunchcontorl Instructions
diff --git a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
index d40886a..2e81ab1 100644
--- a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
@@ -38,14 +38,6 @@ foreach i = 0...4 in {
def R#i : NVPTXReg<"%r"#i>; // 32-bit
def RL#i : NVPTXReg<"%rd"#i>; // 64-bit
def RQ#i : NVPTXReg<"%rq"#i>; // 128-bit
- def H#i : NVPTXReg<"%h"#i>; // 16-bit float
- def HH#i : NVPTXReg<"%hh"#i>; // 2x16-bit float
-
- // Arguments
- def ia#i : NVPTXReg<"%ia"#i>;
- def la#i : NVPTXReg<"%la"#i>;
- def fa#i : NVPTXReg<"%fa"#i>;
- def da#i : NVPTXReg<"%da"#i>;
}
foreach i = 0...31 in {
diff --git a/llvm/lib/Target/PowerPC/PPCMachineScheduler.cpp b/llvm/lib/Target/PowerPC/PPCMachineScheduler.cpp
index 5eb1f01..b7e2263 100644
--- a/llvm/lib/Target/PowerPC/PPCMachineScheduler.cpp
+++ b/llvm/lib/Target/PowerPC/PPCMachineScheduler.cpp
@@ -100,10 +100,14 @@ bool PPCPreRASchedStrategy::tryCandidate(SchedCandidate &Cand,
// This is a best effort to set things up for a post-RA pass. Optimizations
// like generating loads of multiple registers should ideally be done within
// the scheduler pass by combining the loads during DAG postprocessing.
- const ClusterInfo *CandCluster = Cand.AtTop ? TopCluster : BotCluster;
- const ClusterInfo *TryCandCluster = TryCand.AtTop ? TopCluster : BotCluster;
- if (tryGreater(TryCandCluster && TryCandCluster->contains(TryCand.SU),
- CandCluster && CandCluster->contains(Cand.SU), TryCand, Cand,
+ unsigned CandZoneCluster = Cand.AtTop ? TopClusterID : BotClusterID;
+ unsigned TryCandZoneCluster = TryCand.AtTop ? TopClusterID : BotClusterID;
+ bool CandIsClusterSucc =
+ isTheSameCluster(CandZoneCluster, Cand.SU->ParentClusterIdx);
+ bool TryCandIsClusterSucc =
+ isTheSameCluster(TryCandZoneCluster, TryCand.SU->ParentClusterIdx);
+
+ if (tryGreater(TryCandIsClusterSucc, CandIsClusterSucc, TryCand, Cand,
Cluster))
return TryCand.Reason != NoCand;
@@ -189,10 +193,14 @@ bool PPCPostRASchedStrategy::tryCandidate(SchedCandidate &Cand,
return TryCand.Reason != NoCand;
// Keep clustered nodes together.
- const ClusterInfo *CandCluster = Cand.AtTop ? TopCluster : BotCluster;
- const ClusterInfo *TryCandCluster = TryCand.AtTop ? TopCluster : BotCluster;
- if (tryGreater(TryCandCluster && TryCandCluster->contains(TryCand.SU),
- CandCluster && CandCluster->contains(Cand.SU), TryCand, Cand,
+ unsigned CandZoneCluster = Cand.AtTop ? TopClusterID : BotClusterID;
+ unsigned TryCandZoneCluster = TryCand.AtTop ? TopClusterID : BotClusterID;
+ bool CandIsClusterSucc =
+ isTheSameCluster(CandZoneCluster, Cand.SU->ParentClusterIdx);
+ bool TryCandIsClusterSucc =
+ isTheSameCluster(TryCandZoneCluster, TryCand.SU->ParentClusterIdx);
+
+ if (tryGreater(TryCandIsClusterSucc, CandIsClusterSucc, TryCand, Cand,
Cluster))
return TryCand.Reason != NoCand;
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
index 82e3b5c..9538b20 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
@@ -901,7 +901,7 @@ void RISCVAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
unsigned Offset = Fixup.getOffset();
unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8;
- assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+ assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
// For each byte of the fragment that the fixup touches, mask in the
// bits from the fixup value.
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index f223fdbe..5998653 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -2827,6 +2827,8 @@ static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL,
static bool isWorthFoldingAdd(SDValue Add) {
for (auto *User : Add->users()) {
if (User->getOpcode() != ISD::LOAD && User->getOpcode() != ISD::STORE &&
+ User->getOpcode() != RISCVISD::LD_RV32 &&
+ User->getOpcode() != RISCVISD::SD_RV32 &&
User->getOpcode() != ISD::ATOMIC_LOAD &&
User->getOpcode() != ISD::ATOMIC_STORE)
return false;
@@ -2841,6 +2843,9 @@ static bool isWorthFoldingAdd(SDValue Add) {
if (User->getOpcode() == ISD::ATOMIC_STORE &&
cast<AtomicSDNode>(User)->getVal() == Add)
return false;
+ if (User->getOpcode() == RISCVISD::SD_RV32 &&
+ (User->getOperand(0) == Add || User->getOperand(1) == Add))
+ return false;
if (isStrongerThanMonotonic(cast<MemSDNode>(User)->getSuccessOrdering()))
return false;
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b47d89b..c0ada51 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -22725,8 +22725,14 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
bool IsVarArg = CLI.IsVarArg;
EVT PtrVT = getPointerTy(DAG.getDataLayout());
MVT XLenVT = Subtarget.getXLenVT();
+ const CallBase *CB = CLI.CB;
MachineFunction &MF = DAG.getMachineFunction();
+ MachineFunction::CallSiteInfo CSInfo;
+
+ // Set type id for call site info.
+ if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall())
+ CSInfo = MachineFunction::CallSiteInfo(*CB);
// Analyze the operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
@@ -22984,6 +22990,9 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
if (CLI.CFIType)
Ret.getNode()->setCFIType(CLI.CFIType->getZExtValue());
DAG.addNoMergeSiteInfo(Ret.getNode(), CLI.NoMerge);
+ if (MF.getTarget().Options.EmitCallGraphSection && CB &&
+ CB->isIndirectCall())
+ DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
return Ret;
}
@@ -22991,6 +23000,10 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
Chain = DAG.getNode(CallOpc, DL, NodeTys, Ops);
if (CLI.CFIType)
Chain.getNode()->setCFIType(CLI.CFIType->getZExtValue());
+
+ if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall())
+ DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
+
DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
Glue = Chain.getValue(1);
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 6afc942d..03e6f43 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1510,21 +1510,6 @@ class VPseudoTiedBinaryCarryIn<VReg RetClass,
let VLMul = MInfo.value;
}
-class VPseudoTernaryNoMask<VReg RetClass,
- RegisterClass Op1Class,
- DAGOperand Op2Class,
- string Constraint> :
- RISCVVPseudo<(outs RetClass:$rd),
- (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2,
- AVL:$vl, sew:$sew)> {
- let mayLoad = 0;
- let mayStore = 0;
- let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $rs3"], ",");
- let HasVLOp = 1;
- let HasSEWOp = 1;
-}
-
class VPseudoTernaryNoMaskWithPolicy<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class,
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 4eef8de..0d5eb86 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -1280,8 +1280,13 @@ RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
} else {
Ops = {RISCV::VFCVT_X_F_V};
}
- return std::max(SrcLT.first, LT.first) *
- getRISCVInstructionCost(Ops, LT.second, CostKind);
+
+ // We need to use the source LMUL in the case of a narrowing op, and the
+ // destination LMUL otherwise.
+ if (SrcEltSz > DstEltSz)
+ return SrcLT.first *
+ getRISCVInstructionCost(Ops, SrcLT.second, CostKind);
+ return LT.first * getRISCVInstructionCost(Ops, LT.second, CostKind);
}
break;
}
@@ -2622,18 +2627,17 @@ void RISCVTTIImpl::getUnrollingPreferences(
if (L->getNumBlocks() > 4)
return;
- // Don't unroll vectorized loops, including the remainder loop
- if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
- return;
-
// Scan the loop: don't unroll loops with calls as this could prevent
- // inlining.
+ // inlining. Don't unroll auto-vectorized loops either, though do allow
+ // unrolling of the scalar remainder.
+ bool IsVectorized = getBooleanLoopAttribute(L, "llvm.loop.isvectorized");
InstructionCost Cost = 0;
for (auto *BB : L->getBlocks()) {
for (auto &I : *BB) {
- // Initial setting - Don't unroll loops containing vectorized
- // instructions.
- if (I.getType()->isVectorTy())
+ // Both auto-vectorized loops and the scalar remainder have the
+ // isvectorized attribute, so differentiate between them by the presence
+ // of vector instructions.
+ if (IsVectorized && I.getType()->isVectorTy())
return;
if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
index c1cc19b..050de3d 100644
--- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp
@@ -646,8 +646,7 @@ bool RISCVVectorPeephole::foldVMV_V_V(MachineInstr &MI) {
if (!Src || Src->hasUnmodeledSideEffects() ||
Src->getParent() != MI.getParent() ||
!RISCVII::isFirstDefTiedToFirstUse(Src->getDesc()) ||
- !RISCVII::hasVLOp(Src->getDesc().TSFlags) ||
- !RISCVII::hasVecPolicyOp(Src->getDesc().TSFlags))
+ !RISCVII::hasVLOp(Src->getDesc().TSFlags))
return false;
// Src's dest needs to have the same EEW as MI's input.
@@ -681,12 +680,14 @@ bool RISCVVectorPeephole::foldVMV_V_V(MachineInstr &MI) {
*Src->getParent()->getParent()));
}
- // If MI was tail agnostic and the VL didn't increase, preserve it.
- int64_t Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED;
- if ((MI.getOperand(5).getImm() & RISCVVType::TAIL_AGNOSTIC) &&
- RISCV::isVLKnownLE(MI.getOperand(3), SrcVL))
- Policy |= RISCVVType::TAIL_AGNOSTIC;
- Src->getOperand(RISCVII::getVecPolicyOpNum(Src->getDesc())).setImm(Policy);
+ if (RISCVII::hasVecPolicyOp(Src->getDesc().TSFlags)) {
+ // If MI was tail agnostic and the VL didn't increase, preserve it.
+ int64_t Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED;
+ if ((MI.getOperand(5).getImm() & RISCVVType::TAIL_AGNOSTIC) &&
+ RISCV::isVLKnownLE(MI.getOperand(3), SrcVL))
+ Policy |= RISCVVType::TAIL_AGNOSTIC;
+ Src->getOperand(RISCVII::getVecPolicyOpNum(Src->getDesc())).setImm(Policy);
+ }
MRI->constrainRegClass(Src->getOperand(0).getReg(),
MRI->getRegClass(MI.getOperand(0).getReg()));
diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
index 3c631ce..c4c7e85 100644
--- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
@@ -194,6 +194,42 @@ class SPIRVEmitIntrinsics
void useRoundingMode(ConstrainedFPIntrinsic *FPI, IRBuilder<> &B);
+ // Tries to walk the type accessed by the given GEP instruction.
+ // For each nested type access, one of the 2 callbacks is called:
+ // - OnLiteralIndexing when the index is a known constant value.
+ // Parameters:
+ // PointedType: the pointed type resulting of this indexing.
+ // If the parent type is an array, this is the index in the array.
+ // If the parent type is a struct, this is the field index.
+ // Index: index of the element in the parent type.
+ // - OnDynamnicIndexing when the index is a non-constant value.
+ // This callback is only called when indexing into an array.
+ // Parameters:
+ // ElementType: the type of the elements stored in the parent array.
+ // Offset: the Value* containing the byte offset into the array.
+ // Return true if an error occured during the walk, false otherwise.
+ bool walkLogicalAccessChain(
+ GetElementPtrInst &GEP,
+ const std::function<void(Type *PointedType, uint64_t Index)>
+ &OnLiteralIndexing,
+ const std::function<void(Type *ElementType, Value *Offset)>
+ &OnDynamicIndexing);
+
+ // Returns the type accessed using the given GEP instruction by relying
+ // on the GEP type.
+ // FIXME: GEP types are not supposed to be used to retrieve the pointed
+ // type. This must be fixed.
+ Type *getGEPType(GetElementPtrInst *GEP);
+
+ // Returns the type accessed using the given GEP instruction by walking
+ // the source type using the GEP indices.
+ // FIXME: without help from the frontend, this method cannot reliably retrieve
+ // the stored type, nor can robustly determine the depth of the type
+ // we are accessing.
+ Type *getGEPTypeLogical(GetElementPtrInst *GEP);
+
+ Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP);
+
public:
static char ID;
SPIRVEmitIntrinsics(SPIRVTargetMachine *TM = nullptr)
@@ -246,6 +282,17 @@ bool expectIgnoredInIRTranslation(const Instruction *I) {
}
}
+// Returns the source pointer from `I` ignoring intermediate ptrcast.
+Value *getPointerRoot(Value *I) {
+ if (auto *II = dyn_cast<IntrinsicInst>(I)) {
+ if (II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
+ Value *V = II->getArgOperand(0);
+ return getPointerRoot(V);
+ }
+ }
+ return I;
+}
+
} // namespace
char SPIRVEmitIntrinsics::ID = 0;
@@ -555,7 +602,111 @@ void SPIRVEmitIntrinsics::maybeAssignPtrType(Type *&Ty, Value *Op, Type *RefTy,
Ty = RefTy;
}
-Type *getGEPType(GetElementPtrInst *Ref) {
+bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
+ GetElementPtrInst &GEP,
+ const std::function<void(Type *, uint64_t)> &OnLiteralIndexing,
+ const std::function<void(Type *, Value *)> &OnDynamicIndexing) {
+ // We only rewrite i8* GEP. Other should be left as-is.
+ // Valid i8* GEP must always have a single index.
+ assert(GEP.getSourceElementType() ==
+ IntegerType::getInt8Ty(CurrF->getContext()));
+ assert(GEP.getNumIndices() == 1);
+
+ auto &DL = CurrF->getDataLayout();
+ Value *Src = getPointerRoot(GEP.getPointerOperand());
+ Type *CurType = deduceElementType(Src, true);
+
+ Value *Operand = *GEP.idx_begin();
+ ConstantInt *CI = dyn_cast<ConstantInt>(Operand);
+ if (!CI) {
+ ArrayType *AT = dyn_cast<ArrayType>(CurType);
+ // Operand is not constant. Either we have an array and accept it, or we
+ // give up.
+ if (AT)
+ OnDynamicIndexing(AT->getElementType(), Operand);
+ return AT == nullptr;
+ }
+
+ assert(CI);
+ uint64_t Offset = CI->getZExtValue();
+
+ do {
+ if (ArrayType *AT = dyn_cast<ArrayType>(CurType)) {
+ uint32_t EltTypeSize = DL.getTypeSizeInBits(AT->getElementType()) / 8;
+ assert(Offset < AT->getNumElements() * EltTypeSize);
+ uint64_t Index = Offset / EltTypeSize;
+ Offset = Offset - (Index * EltTypeSize);
+ CurType = AT->getElementType();
+ OnLiteralIndexing(CurType, Index);
+ } else if (StructType *ST = dyn_cast<StructType>(CurType)) {
+ uint32_t StructSize = DL.getTypeSizeInBits(ST) / 8;
+ assert(Offset < StructSize);
+ const auto &STL = DL.getStructLayout(ST);
+ unsigned Element = STL->getElementContainingOffset(Offset);
+ Offset -= STL->getElementOffset(Element);
+ CurType = ST->getElementType(Element);
+ OnLiteralIndexing(CurType, Element);
+ } else {
+ // Vector type indexing should not use GEP.
+ // So if we have an index left, something is wrong. Giving up.
+ return true;
+ }
+ } while (Offset > 0);
+
+ return false;
+}
+
+Instruction *
+SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &GEP) {
+ auto &DL = CurrF->getDataLayout();
+ IRBuilder<> B(GEP.getParent());
+ B.SetInsertPoint(&GEP);
+
+ std::vector<Value *> Indices;
+ Indices.push_back(ConstantInt::get(
+ IntegerType::getInt32Ty(CurrF->getContext()), 0, /* Signed= */ false));
+ walkLogicalAccessChain(
+ GEP,
+ [&Indices, &B](Type *EltType, uint64_t Index) {
+ Indices.push_back(
+ ConstantInt::get(B.getInt64Ty(), Index, /* Signed= */ false));
+ },
+ [&Indices, &B, &DL](Type *EltType, Value *Offset) {
+ uint32_t EltTypeSize = DL.getTypeSizeInBits(EltType) / 8;
+ Value *Index = B.CreateUDiv(
+ Offset, ConstantInt::get(Offset->getType(), EltTypeSize,
+ /* Signed= */ false));
+ Indices.push_back(Index);
+ });
+
+ SmallVector<Type *, 2> Types = {GEP.getType(), GEP.getOperand(0)->getType()};
+ SmallVector<Value *, 4> Args;
+ Args.push_back(B.getInt1(GEP.isInBounds()));
+ Args.push_back(GEP.getOperand(0));
+ llvm::append_range(Args, Indices);
+ auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
+ replaceAllUsesWithAndErase(B, &GEP, NewI);
+ return NewI;
+}
+
+Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *GEP) {
+
+ Type *CurType = GEP->getResultElementType();
+
+ bool Interrupted = walkLogicalAccessChain(
+ *GEP, [&CurType](Type *EltType, uint64_t Index) { CurType = EltType; },
+ [&CurType](Type *EltType, Value *Index) { CurType = EltType; });
+
+ return Interrupted ? GEP->getResultElementType() : CurType;
+}
+
+Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *Ref) {
+ if (Ref->getSourceElementType() ==
+ IntegerType::getInt8Ty(CurrF->getContext()) &&
+ TM->getSubtargetImpl()->isLogicalSPIRV()) {
+ return getGEPTypeLogical(Ref);
+ }
+
Type *Ty = nullptr;
// TODO: not sure if GetElementPtrInst::getTypeAtIndex() does anything
// useful here
@@ -1395,6 +1546,13 @@ Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &I) {
}
Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) {
+ if (I.getSourceElementType() == IntegerType::getInt8Ty(CurrF->getContext()) &&
+ TM->getSubtargetImpl()->isLogicalSPIRV()) {
+ Instruction *Result = buildLogicalAccessChainFromGEP(I);
+ if (Result)
+ return Result;
+ }
+
IRBuilder<> B(I.getParent());
B.SetInsertPoint(&I);
SmallVector<Type *, 2> Types = {I.getType(), I.getOperand(0)->getType()};
@@ -1588,7 +1746,24 @@ void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *I,
}
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
Value *Pointer = GEPI->getPointerOperand();
- Type *OpTy = GEPI->getSourceElementType();
+ Type *OpTy = nullptr;
+
+ // Knowing the accessed type is mandatory for logical SPIR-V. Sadly,
+ // the GEP source element type should not be used for this purpose, and
+ // the alternative type-scavenging method is not working.
+ // Physical SPIR-V can work around this, but not logical, hence still
+ // try to rely on the broken type scavenging for logical.
+ bool IsRewrittenGEP =
+ GEPI->getSourceElementType() == IntegerType::getInt8Ty(I->getContext());
+ if (IsRewrittenGEP && TM->getSubtargetImpl()->isLogicalSPIRV()) {
+ Value *Src = getPointerRoot(Pointer);
+ OpTy = GR->findDeducedElementType(Src);
+ }
+
+ // In all cases, fall back to the GEP type if type scavenging failed.
+ if (!OpTy)
+ OpTy = GEPI->getSourceElementType();
+
replacePointerOperandWithPtrCast(I, Pointer, OpTy, 0, B);
if (isNestedPointer(OpTy))
insertTodoType(Pointer);
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp
index 5cda6a0..7505507 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp
@@ -74,17 +74,20 @@ class SPIRVLegalizePointerCast : public FunctionPass {
// Returns the loaded value.
Value *loadVectorFromVector(IRBuilder<> &B, FixedVectorType *SourceType,
FixedVectorType *TargetType, Value *Source) {
- // We expect the codegen to avoid doing implicit bitcast from a load.
- assert(TargetType->getElementType() == SourceType->getElementType());
- assert(TargetType->getNumElements() < SourceType->getNumElements());
-
+ assert(TargetType->getNumElements() <= SourceType->getNumElements());
LoadInst *NewLoad = B.CreateLoad(SourceType, Source);
buildAssignType(B, SourceType, NewLoad);
+ Value *AssignValue = NewLoad;
+ if (TargetType->getElementType() != SourceType->getElementType()) {
+ AssignValue = B.CreateIntrinsic(Intrinsic::spv_bitcast,
+ {TargetType, SourceType}, {NewLoad});
+ buildAssignType(B, TargetType, AssignValue);
+ }
SmallVector<int> Mask(/* Size= */ TargetType->getNumElements());
for (unsigned I = 0; I < TargetType->getNumElements(); ++I)
Mask[I] = I;
- Value *Output = B.CreateShuffleVector(NewLoad, NewLoad, Mask);
+ Value *Output = B.CreateShuffleVector(AssignValue, AssignValue, Mask);
buildAssignType(B, TargetType, Output);
return Output;
}
@@ -135,8 +138,9 @@ class SPIRVLegalizePointerCast : public FunctionPass {
Output = loadFirstValueFromAggregate(B, SVT->getElementType(),
OriginalOperand, LI);
}
- // Destination is a smaller vector than source.
+ // Destination is a smaller vector than source or different vector type.
// - float3 v3 = vector4;
+ // - float4 v2 = int4;
else if (SVT && DVT)
Output = loadVectorFromVector(B, SVT, DVT, OriginalOperand);
// Destination is the scalar type stored at the start of an aggregate.
diff --git a/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h b/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h
index 43bf6e9..60c4e2d 100644
--- a/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h
+++ b/llvm/lib/Target/SPIRV/SPIRVTargetTransformInfo.h
@@ -59,6 +59,8 @@ public:
Intrinsic::ID IID) const override;
Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
Value *NewV) const override;
+
+ bool allowVectorElementIndexingUsingGEP() const override { return false; }
};
} // namespace llvm
diff --git a/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp b/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
index d5f8492..b2cfd04 100644
--- a/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
+++ b/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
@@ -165,7 +165,7 @@ void SystemZMCAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
unsigned BitSize = getFixupKindInfo(Kind).TargetSize;
unsigned Size = (BitSize + 7) / 8;
- assert(Offset + Size <= Data.size() && "Invalid fixup offset!");
+ assert(Offset + Size <= F.getSize() && "Invalid fixup offset!");
// Big-endian insertion of Size bytes.
Value = extractBitsForFixup(Kind, Value, Fixup, getContext());
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index e30d723..fb0a47d 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -9044,7 +9044,7 @@ static unsigned detectEvenOddMultiplyOperand(const SelectionDAG &DAG,
if (unsigned(ShuffleMask[Elt]) != 2 * Elt)
CanUseEven = false;
if (unsigned(ShuffleMask[Elt]) != 2 * Elt + 1)
- CanUseEven = true;
+ CanUseOdd = false;
}
Op = Op.getOperand(0);
if (CanUseEven)
diff --git a/llvm/lib/Target/VE/MCTargetDesc/VEAsmBackend.cpp b/llvm/lib/Target/VE/MCTargetDesc/VEAsmBackend.cpp
index f987621..b02b6af 100644
--- a/llvm/lib/Target/VE/MCTargetDesc/VEAsmBackend.cpp
+++ b/llvm/lib/Target/VE/MCTargetDesc/VEAsmBackend.cpp
@@ -174,7 +174,7 @@ void VEAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
unsigned Offset = Fixup.getOffset();
- assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+ assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
// For each byte of the fragment that the fixup touches, mask in the bits
// from the fixup value. The Value has been "split up" into the
// appropriate bitfields above.
diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp
index 837fd8e..84eb15f 100644
--- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp
+++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp
@@ -97,7 +97,7 @@ void WebAssemblyAsmBackend::applyFixup(const MCFragment &F,
Value <<= Info.TargetOffset;
unsigned Offset = Fixup.getOffset();
- assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+ assert(Offset + NumBytes <= F.getSize() && "Invalid fixup offset!");
// For each byte of the fragment that the fixup touches, mask in the
// bits from the fixup value.
diff --git a/llvm/lib/Target/WebAssembly/WebAssembly.td b/llvm/lib/Target/WebAssembly/WebAssembly.td
index a606209..089be5f 100644
--- a/llvm/lib/Target/WebAssembly/WebAssembly.td
+++ b/llvm/lib/Target/WebAssembly/WebAssembly.td
@@ -49,6 +49,8 @@ def FeatureFP16 :
SubtargetFeature<"fp16", "HasFP16", "true",
"Enable FP16 instructions">;
+def FeatureGC : SubtargetFeature<"gc", "HasGC", "true", "Enable wasm gc">;
+
def FeatureMultiMemory :
SubtargetFeature<"multimemory", "HasMultiMemory", "true",
"Enable multiple memories">;
@@ -71,7 +73,6 @@ def FeatureReferenceTypes :
SubtargetFeature<"reference-types", "HasReferenceTypes", "true",
"Enable reference types">;
-def FeatureGC : SubtargetFeature<"gc", "HasGC", "true", "Enable wasm gc">;
def FeatureRelaxedSIMD :
SubtargetFeature<"relaxed-simd", "SIMDLevel", "RelaxedSIMD",
"Enable relaxed-simd instructions">;
@@ -139,10 +140,10 @@ def : ProcessorModel<"lime1", NoSchedModel,
def : ProcessorModel<"bleeding-edge", NoSchedModel,
[FeatureAtomics, FeatureBulkMemory, FeatureBulkMemoryOpt,
FeatureCallIndirectOverlong, FeatureExceptionHandling,
- FeatureExtendedConst, FeatureFP16, FeatureMultiMemory,
- FeatureMultivalue, FeatureMutableGlobals,
+ FeatureExtendedConst, FeatureFP16, FeatureGC,
+ FeatureMultiMemory, FeatureMultivalue, FeatureMutableGlobals,
FeatureNontrappingFPToInt, FeatureRelaxedSIMD,
- FeatureReferenceTypes, FeatureGC, FeatureSIMD128,
+ FeatureReferenceTypes, FeatureSIMD128,
FeatureSignExt, FeatureTailCall]>;
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
index 2b632fd..13d048a 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
@@ -50,6 +50,9 @@ def HasFP16 :
Predicate<"Subtarget->hasFP16()">,
AssemblerPredicate<(all_of FeatureFP16), "fp16">;
+def HasGC : Predicate<"Subtarget->hasGC()">,
+ AssemblerPredicate<(all_of FeatureGC), "gc">;
+
def HasMultiMemory :
Predicate<"Subtarget->hasMultiMemory()">,
AssemblerPredicate<(all_of FeatureMultiMemory), "multimemory">;
@@ -76,9 +79,6 @@ def HasReferenceTypes :
Predicate<"Subtarget->hasReferenceTypes()">,
AssemblerPredicate<(all_of FeatureReferenceTypes), "reference-types">;
-def HasGC : Predicate<"Subtarget->hasGC()">,
- AssemblerPredicate<(all_of FeatureGC), "gc">;
-
def HasRelaxedSIMD :
Predicate<"Subtarget->hasRelaxedSIMD()">,
AssemblerPredicate<(all_of FeatureRelaxedSIMD), "relaxed-simd">;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
index f814274..2f88bbb 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
@@ -46,12 +46,12 @@ class WebAssemblySubtarget final : public WebAssemblyGenSubtargetInfo {
bool HasExceptionHandling = false;
bool HasExtendedConst = false;
bool HasFP16 = false;
+ bool HasGC = false;
bool HasMultiMemory = false;
bool HasMultivalue = false;
bool HasMutableGlobals = false;
bool HasNontrappingFPToInt = false;
bool HasReferenceTypes = false;
- bool HasGC = false;
bool HasSignExt = false;
bool HasTailCall = false;
bool HasWideArithmetic = false;
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 7f9d474..1efef83 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -690,7 +690,7 @@ void X86AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
return;
unsigned Size = getFixupKindSize(Kind);
- assert(Fixup.getOffset() + Size <= Data.size() && "Invalid fixup offset!");
+ assert(Fixup.getOffset() + Size <= F.getSize() && "Invalid fixup offset!");
int64_t SignedValue = static_cast<int64_t>(Value);
if (IsResolved && Fixup.isPCRel()) {
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index 0ff7f23..067bd43 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -3673,6 +3673,12 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
CLI.NumResultRegs = RVLocs.size();
CLI.Call = MIB;
+ // Add call site info for call graph section.
+ if (TM.Options.EmitCallGraphSection && CB && CB->isIndirectCall()) {
+ MachineFunction::CallSiteInfo CSInfo(*CB);
+ MF->addCallSiteInfo(CLI.Call, std::move(CSInfo));
+ }
+
return true;
}
@@ -4042,6 +4048,8 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
MO.setReg(IndexReg);
}
+ if (MI->isCall())
+ FuncInfo.MF->moveAdditionalCallInfo(MI, Result);
Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
Result->cloneInstrSymbols(*FuncInfo.MF, *MI);
MachineBasicBlock::iterator I(MI);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 7244a6d..bbbb1d9 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -58071,12 +58071,24 @@ static SDValue combineX86CloadCstore(SDNode *N, SelectionDAG &DAG) {
Ops[3] = Op1.getOperand(0);
Ops[4] = Op1.getOperand(1);
} else if (Op1.getOpcode() == ISD::AND && Sub.getValue(0).use_empty()) {
+ SDValue Src = Op1;
+ SDValue Op10 = Op1.getOperand(0);
+ if (Op10.getOpcode() == ISD::XOR && isAllOnesConstant(Op10.getOperand(1))) {
+ // res, flags2 = sub 0, (and (xor X, -1), Y)
+ // cload/cstore ..., cond_ne, flag2
+ // ->
+ // res, flags2 = sub 0, (and X, Y)
+ // cload/cstore ..., cond_e, flag2
+ Src = DAG.getNode(ISD::AND, DL, Op1.getValueType(), Op10.getOperand(0),
+ Op1.getOperand(1));
+ Ops[3] = DAG.getTargetConstant(X86::COND_E, DL, MVT::i8);
+ }
// res, flags2 = sub 0, (and X, Y)
- // cload/cstore ..., cond_ne, flag2
+ // cload/cstore ..., cc, flag2
// ->
// res, flags2 = cmp (and X, Y), 0
- // cload/cstore ..., cond_ne, flag2
- Ops[4] = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Op1, Sub.getOperand(0));
+ // cload/cstore ..., cc, flag2
+ Ops[4] = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Src, Sub.getOperand(0));
} else {
return SDValue();
}
diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index b4639ac..5862c7e 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -2060,6 +2060,10 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (CallConv == CallingConv::X86_INTR)
report_fatal_error("X86 interrupts may not be called directly");
+ // Set type id for call site info.
+ if (MF.getTarget().Options.EmitCallGraphSection && CB && CB->isIndirectCall())
+ CSInfo = MachineFunction::CallSiteInfo(*CB);
+
if (IsIndirectCall && !IsWin64 &&
M->getModuleFlag("import-call-optimization"))
errorUnsupported(DAG, dl,
diff --git a/llvm/lib/TargetParser/Host.cpp b/llvm/lib/TargetParser/Host.cpp
index 78bd5b4..7e09d30 100644
--- a/llvm/lib/TargetParser/Host.cpp
+++ b/llvm/lib/TargetParser/Host.cpp
@@ -587,8 +587,9 @@ StringRef sys::detail::getHostCPUNameForBPF() {
#endif
}
-#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
- defined(_M_X64)
+#if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
+ defined(_M_X64)) && \
+ !defined(_M_ARM64EC)
/// getX86CpuIDAndInfo - Execute the specified cpuid and return the 4 values in
/// the specified arguments. If we can't run cpuid on the host, return true.
@@ -1853,8 +1854,9 @@ VendorSignatures getVendorSignature(unsigned *MaxLeaf) {
} // namespace llvm
#endif
-#if defined(__i386__) || defined(_M_IX86) || \
- defined(__x86_64__) || defined(_M_X64)
+#if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
+ defined(_M_X64)) && \
+ !defined(_M_ARM64EC)
StringMap<bool> sys::getHostCPUFeatures() {
unsigned EAX = 0, EBX = 0, ECX = 0, EDX = 0;
unsigned MaxLevel;
@@ -2147,7 +2149,8 @@ StringMap<bool> sys::getHostCPUFeatures() {
return Features;
}
-#elif defined(_WIN32) && (defined(__aarch64__) || defined(_M_ARM64))
+#elif defined(_WIN32) && (defined(__aarch64__) || defined(_M_ARM64) || \
+ defined(__arm64ec__) || defined(_M_ARM64EC))
StringMap<bool> sys::getHostCPUFeatures() {
StringMap<bool> Features;
diff --git a/llvm/lib/TargetParser/TargetParser.cpp b/llvm/lib/TargetParser/TargetParser.cpp
index e5c896f..126be71 100644
--- a/llvm/lib/TargetParser/TargetParser.cpp
+++ b/llvm/lib/TargetParser/TargetParser.cpp
@@ -446,6 +446,7 @@ void AMDGPU::fillAMDGPUFeatureMap(StringRef GPU, const Triple &T,
Features["tanh-insts"] = true;
Features["transpose-load-f4f6-insts"] = true;
Features["bf16-trans-insts"] = true;
+ Features["bf16-cvt-insts"] = true;
Features["fp8-conversion-insts"] = true;
Features["fp8e5m3-insts"] = true;
Features["permlane16-swap"] = true;
diff --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
index 7af5ba4..40a7f80 100644
--- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
+++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
@@ -458,29 +458,19 @@ static bool foldSqrt(CallInst *Call, LibFunc Func, TargetTransformInfo &TTI,
// Check if this array of constants represents a cttz table.
// Iterate over the elements from \p Table by trying to find/match all
// the numbers from 0 to \p InputBits that should represent cttz results.
-static bool isCTTZTable(const ConstantDataArray &Table, uint64_t Mul,
- uint64_t Shift, uint64_t InputBits) {
- unsigned Length = Table.getNumElements();
- if (Length < InputBits || Length > InputBits * 2)
- return false;
-
- APInt Mask = APInt::getBitsSetFrom(InputBits, Shift);
- unsigned Matched = 0;
-
- for (unsigned i = 0; i < Length; i++) {
- uint64_t Element = Table.getElementAsInteger(i);
- if (Element >= InputBits)
- continue;
-
- // Check if \p Element matches a concrete answer. It could fail for some
- // elements that are never accessed, so we keep iterating over each element
- // from the table. The number of matched elements should be equal to the
- // number of potential right answers which is \p InputBits actually.
- if ((((Mul << Element) & Mask.getZExtValue()) >> Shift) == i)
- Matched++;
+static bool isCTTZTable(Constant *Table, const APInt &Mul, const APInt &Shift,
+ const APInt &AndMask, Type *AccessTy,
+ unsigned InputBits, const APInt &GEPIdxFactor,
+ const DataLayout &DL) {
+ for (unsigned Idx = 0; Idx < InputBits; Idx++) {
+ APInt Index = (APInt(InputBits, 1).shl(Idx) * Mul).lshr(Shift) & AndMask;
+ ConstantInt *C = dyn_cast_or_null<ConstantInt>(
+ ConstantFoldLoadFromConst(Table, AccessTy, Index * GEPIdxFactor, DL));
+ if (!C || C->getValue() != Idx)
+ return false;
}
- return Matched == InputBits;
+ return true;
}
// Try to recognize table-based ctz implementation.
@@ -495,6 +485,11 @@ static bool isCTTZTable(const ConstantDataArray &Table, uint64_t Mul,
// this can be lowered to `cttz` instruction.
// There is also a special case when the element is 0.
//
+// The (x & -x) sets the lowest non-zero bit to 1. The multiply is a de-bruijn
+// sequence that contains each pattern of bits in it. The shift extracts
+// the top bits after the multiply, and that index into the table should
+// represent the number of trailing zeros in the original number.
+//
// Here are some examples or LLVM IR for a 64-bit target:
//
// CASE 1:
@@ -536,8 +531,8 @@ static bool isCTTZTable(const ConstantDataArray &Table, uint64_t Mul,
// i64 %shr
// %0 = load i8, i8* %arrayidx, align 1, !tbaa !8
//
-// All this can be lowered to @llvm.cttz.i32/64 intrinsic.
-static bool tryToRecognizeTableBasedCttz(Instruction &I) {
+// All these can be lowered to @llvm.cttz.i32/64 intrinsics.
+static bool tryToRecognizeTableBasedCttz(Instruction &I, const DataLayout &DL) {
LoadInst *LI = dyn_cast<LoadInst>(&I);
if (!LI)
return false;
@@ -547,53 +542,47 @@ static bool tryToRecognizeTableBasedCttz(Instruction &I) {
return false;
GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getPointerOperand());
- if (!GEP || !GEP->hasNoUnsignedSignedWrap() || GEP->getNumIndices() != 2)
- return false;
-
- if (!GEP->getSourceElementType()->isArrayTy())
- return false;
-
- uint64_t ArraySize = GEP->getSourceElementType()->getArrayNumElements();
- if (ArraySize != 32 && ArraySize != 64)
+ if (!GEP || !GEP->hasNoUnsignedSignedWrap())
return false;
GlobalVariable *GVTable = dyn_cast<GlobalVariable>(GEP->getPointerOperand());
if (!GVTable || !GVTable->hasInitializer() || !GVTable->isConstant())
return false;
- ConstantDataArray *ConstData =
- dyn_cast<ConstantDataArray>(GVTable->getInitializer());
- if (!ConstData)
- return false;
-
- if (!match(GEP->idx_begin()->get(), m_ZeroInt()))
+ unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
+ APInt ModOffset(BW, 0);
+ SmallMapVector<Value *, APInt, 4> VarOffsets;
+ if (!GEP->collectOffset(DL, BW, VarOffsets, ModOffset) ||
+ VarOffsets.size() != 1 || ModOffset != 0)
return false;
+ auto [GepIdx, GEPScale] = VarOffsets.front();
- Value *Idx2 = std::next(GEP->idx_begin())->get();
Value *X1;
- uint64_t MulConst, ShiftConst;
- // FIXME: 64-bit targets have `i64` type for the GEP index, so this match will
- // probably fail for other (e.g. 32-bit) targets.
- if (!match(Idx2, m_ZExtOrSelf(
- m_LShr(m_Mul(m_c_And(m_Neg(m_Value(X1)), m_Deferred(X1)),
- m_ConstantInt(MulConst)),
- m_ConstantInt(ShiftConst)))))
+ const APInt *MulConst, *ShiftConst, *AndCst = nullptr;
+ // Check that the gep variable index is ((x & -x) * MulConst) >> ShiftConst.
+ // This might be extended to the pointer index type, and if the gep index type
+ // has been replaced with an i8 then a new And (and different ShiftConst) will
+ // be present.
+ auto MatchInner = m_LShr(
+ m_Mul(m_c_And(m_Neg(m_Value(X1)), m_Deferred(X1)), m_APInt(MulConst)),
+ m_APInt(ShiftConst));
+ if (!match(GepIdx, m_CastOrSelf(MatchInner)) &&
+ !match(GepIdx, m_CastOrSelf(m_And(MatchInner, m_APInt(AndCst)))))
return false;
unsigned InputBits = X1->getType()->getScalarSizeInBits();
- if (InputBits != 32 && InputBits != 64)
- return false;
-
- // Shift should extract top 5..7 bits.
- if (InputBits - Log2_32(InputBits) != ShiftConst &&
- InputBits - Log2_32(InputBits) - 1 != ShiftConst)
+ if (InputBits != 16 && InputBits != 32 && InputBits != 64 && InputBits != 128)
return false;
- if (!isCTTZTable(*ConstData, MulConst, ShiftConst, InputBits))
+ if (!GEPScale.isIntN(InputBits) ||
+ !isCTTZTable(GVTable->getInitializer(), *MulConst, *ShiftConst,
+ AndCst ? *AndCst : APInt::getAllOnes(InputBits), AccessType,
+ InputBits, GEPScale.zextOrTrunc(InputBits), DL))
return false;
- auto ZeroTableElem = ConstData->getElementAsInteger(0);
- bool DefinedForZero = ZeroTableElem == InputBits;
+ ConstantInt *ZeroTableElem = cast<ConstantInt>(
+ ConstantFoldLoadFromConst(GVTable->getInitializer(), AccessType, DL));
+ bool DefinedForZero = ZeroTableElem->getZExtValue() == InputBits;
IRBuilder<> B(LI);
ConstantInt *BoolConst = B.getInt1(!DefinedForZero);
@@ -607,8 +596,7 @@ static bool tryToRecognizeTableBasedCttz(Instruction &I) {
// If the value in elem 0 isn't the same as InputBits, we still want to
// produce the value from the table.
auto Cmp = B.CreateICmpEQ(X1, ConstantInt::get(XType, 0));
- auto Select =
- B.CreateSelect(Cmp, ConstantInt::get(XType, ZeroTableElem), Cttz);
+ auto Select = B.CreateSelect(Cmp, B.CreateZExt(ZeroTableElem, XType), Cttz);
// NOTE: If the table[0] is 0, but the cttz(0) is defined by the Target
// it should be handled as: `cttz(x) & (typeSize - 1)`.
@@ -1477,7 +1465,7 @@ static bool foldUnusualPatterns(Function &F, DominatorTree &DT,
MadeChange |= foldGuardedFunnelShift(I, DT);
MadeChange |= tryToRecognizePopCount(I);
MadeChange |= tryToFPToSat(I, TTI);
- MadeChange |= tryToRecognizeTableBasedCttz(I);
+ MadeChange |= tryToRecognizeTableBasedCttz(I, DL);
MadeChange |= foldConsecutiveLoads(I, DL, TTI, AA, DT);
MadeChange |= foldPatternedLoads(I, DL);
MadeChange |= foldICmpOrChain(I, DL, TTI, AA, DT);
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 3c24d2e..01da012 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -13404,7 +13404,7 @@ struct AAAllocationInfoImpl : public AAAllocationInfo {
return indicatePessimisticFixpoint();
if (BinSize == 0) {
- auto NewAllocationSize = std::optional<TypeSize>(TypeSize(0, false));
+ auto NewAllocationSize = std::make_optional<TypeSize>(0, false);
if (!changeAllocationSize(NewAllocationSize))
return ChangeStatus::UNCHANGED;
return ChangeStatus::CHANGED;
@@ -13422,8 +13422,7 @@ struct AAAllocationInfoImpl : public AAAllocationInfo {
if (SizeOfBin >= *AllocationSize)
return indicatePessimisticFixpoint();
- auto NewAllocationSize =
- std::optional<TypeSize>(TypeSize(SizeOfBin * 8, false));
+ auto NewAllocationSize = std::make_optional<TypeSize>(SizeOfBin * 8, false);
if (!changeAllocationSize(NewAllocationSize))
return ChangeStatus::UNCHANGED;
diff --git a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
index 486205c..57844a1 100644
--- a/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
+++ b/llvm/lib/Transforms/IPO/LowerTypeTests.cpp
@@ -502,8 +502,7 @@ class LowerTypeTestsModule {
uint8_t *exportTypeId(StringRef TypeId, const TypeIdLowering &TIL);
TypeIdLowering importTypeId(StringRef TypeId);
void importTypeTest(CallInst *CI);
- void importFunction(Function *F, bool isJumpTableCanonical,
- std::vector<GlobalAlias *> &AliasesToErase);
+ void importFunction(Function *F, bool isJumpTableCanonical);
BitSetInfo
buildBitSet(Metadata *TypeId,
@@ -1103,9 +1102,8 @@ void LowerTypeTestsModule::maybeReplaceComdat(Function *F,
// ThinLTO backend: the function F has a jump table entry; update this module
// accordingly. isJumpTableCanonical describes the type of the jump table entry.
-void LowerTypeTestsModule::importFunction(
- Function *F, bool isJumpTableCanonical,
- std::vector<GlobalAlias *> &AliasesToErase) {
+void LowerTypeTestsModule::importFunction(Function *F,
+ bool isJumpTableCanonical) {
assert(F->getType()->getAddressSpace() == 0);
GlobalValue::VisibilityTypes Visibility = F->getVisibility();
@@ -1135,23 +1133,23 @@ void LowerTypeTestsModule::importFunction(
} else {
F->setName(Name + ".cfi");
maybeReplaceComdat(F, Name);
- F->setLinkage(GlobalValue::ExternalLinkage);
FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
F->getAddressSpace(), Name, &M);
FDecl->setVisibility(Visibility);
Visibility = GlobalValue::HiddenVisibility;
- // Delete aliases pointing to this function, they'll be re-created in the
- // merged output. Don't do it yet though because ScopedSaveAliaseesAndUsed
- // will want to reset the aliasees first.
+ // Update aliases pointing to this function to also include the ".cfi" suffix,
+ // We expect the jump table entry to either point to the real function or an
+ // alias. Redirect all other users to the jump table entry.
for (auto &U : F->uses()) {
if (auto *A = dyn_cast<GlobalAlias>(U.getUser())) {
+ std::string AliasName = A->getName().str() + ".cfi";
Function *AliasDecl = Function::Create(
F->getFunctionType(), GlobalValue::ExternalLinkage,
F->getAddressSpace(), "", &M);
AliasDecl->takeName(A);
A->replaceAllUsesWith(AliasDecl);
- AliasesToErase.push_back(A);
+ A->setName(AliasName);
}
}
}
@@ -2077,16 +2075,13 @@ bool LowerTypeTestsModule::lower() {
Decls.push_back(&F);
}
- std::vector<GlobalAlias *> AliasesToErase;
{
ScopedSaveAliaseesAndUsed S(M);
for (auto *F : Defs)
- importFunction(F, /*isJumpTableCanonical*/ true, AliasesToErase);
+ importFunction(F, /*isJumpTableCanonical*/ true);
for (auto *F : Decls)
- importFunction(F, /*isJumpTableCanonical*/ false, AliasesToErase);
+ importFunction(F, /*isJumpTableCanonical*/ false);
}
- for (GlobalAlias *GA : AliasesToErase)
- GA->eraseFromParent();
return true;
}
@@ -2137,6 +2132,18 @@ bool LowerTypeTestsModule::lower() {
if (auto Alias = dyn_cast<AliasSummary>(RefGVS.get()))
AddressTaken.insert(Alias->getAliaseeGUID());
}
+ auto IsAddressTaken = [&](GlobalValue::GUID GUID) {
+ if (AddressTaken.count(GUID))
+ return true;
+ auto VI = ExportSummary->getValueInfo(GUID);
+ if (!VI)
+ return false;
+ for (auto &I : VI.getSummaryList())
+ if (auto Alias = dyn_cast<AliasSummary>(I.get()))
+ if (AddressTaken.count(Alias->getAliaseeGUID()))
+ return true;
+ return false;
+ };
for (auto *FuncMD : CfiFunctionsMD->operands()) {
assert(FuncMD->getNumOperands() >= 2);
StringRef FunctionName =
@@ -2153,7 +2160,7 @@ bool LowerTypeTestsModule::lower() {
// have no live references (and are not exported with cross-DSO CFI.)
if (!ExportSummary->isGUIDLive(GUID))
continue;
- if (!AddressTaken.count(GUID)) {
+ if (!IsAddressTaken(GUID)) {
if (!CrossDsoCfi || Linkage != CFL_Definition)
continue;
@@ -2227,6 +2234,43 @@ bool LowerTypeTestsModule::lower() {
}
}
+ struct AliasToCreate {
+ Function *Alias;
+ std::string TargetName;
+ };
+ std::vector<AliasToCreate> AliasesToCreate;
+
+ // Parse alias data to replace stand-in function declarations for aliases
+ // with an alias to the intended target.
+ if (ExportSummary) {
+ if (NamedMDNode *AliasesMD = M.getNamedMetadata("aliases")) {
+ for (auto *AliasMD : AliasesMD->operands()) {
+ SmallVector<Function *> Aliases;
+ for (Metadata *MD : AliasMD->operands()) {
+ auto *MDS = dyn_cast<MDString>(MD);
+ if (!MDS)
+ continue;
+ StringRef AliasName = MDS->getString();
+ if (!ExportedFunctions.count(AliasName))
+ continue;
+ auto *AliasF = M.getFunction(AliasName);
+ if (AliasF)
+ Aliases.push_back(AliasF);
+ }
+
+ if (Aliases.empty())
+ continue;
+
+ for (unsigned I = 1; I != Aliases.size(); ++I) {
+ auto *AliasF = Aliases[I];
+ ExportedFunctions.erase(AliasF->getName());
+ AliasesToCreate.push_back(
+ {AliasF, std::string(Aliases[0]->getName())});
+ }
+ }
+ }
+ }
+
DenseMap<GlobalObject *, GlobalTypeMember *> GlobalTypeMembers;
for (GlobalObject &GO : M.global_objects()) {
if (isa<GlobalVariable>(GO) && GO.isDeclarationForLinker())
@@ -2414,47 +2458,16 @@ bool LowerTypeTestsModule::lower() {
allocateByteArrays();
- // Parse alias data to replace stand-in function declarations for aliases
- // with an alias to the intended target.
- if (ExportSummary) {
- if (NamedMDNode *AliasesMD = M.getNamedMetadata("aliases")) {
- for (auto *AliasMD : AliasesMD->operands()) {
- assert(AliasMD->getNumOperands() >= 4);
- StringRef AliasName =
- cast<MDString>(AliasMD->getOperand(0))->getString();
- StringRef Aliasee = cast<MDString>(AliasMD->getOperand(1))->getString();
-
- if (auto It = ExportedFunctions.find(Aliasee);
- It == ExportedFunctions.end() ||
- It->second.Linkage != CFL_Definition || !M.getNamedAlias(Aliasee))
- continue;
-
- GlobalValue::VisibilityTypes Visibility =
- static_cast<GlobalValue::VisibilityTypes>(
- cast<ConstantAsMetadata>(AliasMD->getOperand(2))
- ->getValue()
- ->getUniqueInteger()
- .getZExtValue());
- bool Weak =
- static_cast<bool>(cast<ConstantAsMetadata>(AliasMD->getOperand(3))
- ->getValue()
- ->getUniqueInteger()
- .getZExtValue());
-
- auto *Alias = GlobalAlias::create("", M.getNamedAlias(Aliasee));
- Alias->setVisibility(Visibility);
- if (Weak)
- Alias->setLinkage(GlobalValue::WeakAnyLinkage);
-
- if (auto *F = M.getFunction(AliasName)) {
- Alias->takeName(F);
- F->replaceAllUsesWith(Alias);
- F->eraseFromParent();
- } else {
- Alias->setName(AliasName);
- }
- }
- }
+ for (auto A : AliasesToCreate) {
+ auto *Target = M.getNamedValue(A.TargetName);
+ if (!isa<GlobalAlias>(Target))
+ continue;
+ auto *AliasGA = GlobalAlias::create("", Target);
+ AliasGA->setVisibility(A.Alias->getVisibility());
+ AliasGA->setLinkage(A.Alias->getLinkage());
+ AliasGA->takeName(A.Alias);
+ A.Alias->replaceAllUsesWith(AliasGA);
+ A.Alias->eraseFromParent();
}
// Emit .symver directives for exported functions, if they exist.
diff --git a/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp b/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
index e276376..4387c38 100644
--- a/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
+++ b/llvm/lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp
@@ -384,6 +384,10 @@ void splitAndWriteThinLTOBitcode(
for (auto &F : M)
if ((!F.hasLocalLinkage() || F.hasAddressTaken()) && HasTypeMetadata(&F))
CfiFunctions.insert(&F);
+ for (auto &A : M.aliases())
+ if (auto *F = dyn_cast<Function>(A.getAliasee()))
+ if (HasTypeMetadata(F))
+ CfiFunctions.insert(&A);
// Remove all globals with type metadata, globals with comdats that live in
// MergedM, and aliases pointing to such globals from the thin LTO module.
@@ -403,12 +407,12 @@ void splitAndWriteThinLTOBitcode(
auto &Ctx = MergedM->getContext();
SmallVector<MDNode *, 8> CfiFunctionMDs;
for (auto *V : CfiFunctions) {
- Function &F = *cast<Function>(V);
+ Function &F = *cast<Function>(V->getAliaseeObject());
SmallVector<MDNode *, 2> Types;
F.getMetadata(LLVMContext::MD_type, Types);
SmallVector<Metadata *, 4> Elts;
- Elts.push_back(MDString::get(Ctx, F.getName()));
+ Elts.push_back(MDString::get(Ctx, V->getName()));
CfiFunctionLinkage Linkage;
if (lowertypetests::isJumpTableCanonical(&F))
Linkage = CFL_Definition;
@@ -428,29 +432,24 @@ void splitAndWriteThinLTOBitcode(
NMD->addOperand(MD);
}
- SmallVector<MDNode *, 8> FunctionAliases;
+ MapVector<Function *, std::vector<GlobalAlias *>> FunctionAliases;
for (auto &A : M.aliases()) {
if (!isa<Function>(A.getAliasee()))
continue;
auto *F = cast<Function>(A.getAliasee());
-
- Metadata *Elts[] = {
- MDString::get(Ctx, A.getName()),
- MDString::get(Ctx, F->getName()),
- ConstantAsMetadata::get(
- ConstantInt::get(Type::getInt8Ty(Ctx), A.getVisibility())),
- ConstantAsMetadata::get(
- ConstantInt::get(Type::getInt8Ty(Ctx), A.isWeakForLinker())),
- };
-
- FunctionAliases.push_back(MDTuple::get(Ctx, Elts));
+ FunctionAliases[F].push_back(&A);
}
if (!FunctionAliases.empty()) {
NamedMDNode *NMD = MergedM->getOrInsertNamedMetadata("aliases");
- for (auto *MD : FunctionAliases)
- NMD->addOperand(MD);
+ for (auto &Alias : FunctionAliases) {
+ SmallVector<Metadata *> Elts;
+ Elts.push_back(MDString::get(Ctx, Alias.first->getName()));
+ for (auto *A : Alias.second)
+ Elts.push_back(MDString::get(Ctx, A->getName()));
+ NMD->addOperand(MDTuple::get(Ctx, Elts));
+ }
}
SmallVector<MDNode *, 8> Symvers;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 1b78ace..47e017e 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -3891,16 +3891,20 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
}
}
- // Try to fold intrinsic into select operands. This is legal if:
+ // Try to fold intrinsic into select/phi operands. This is legal if:
// * The intrinsic is speculatable.
// * The select condition is not a vector, or the intrinsic does not
// perform cross-lane operations.
if (isSafeToSpeculativelyExecuteWithVariableReplaced(&CI) &&
isNotCrossLaneOperation(II))
- for (Value *Op : II->args())
+ for (Value *Op : II->args()) {
if (auto *Sel = dyn_cast<SelectInst>(Op))
if (Instruction *R = FoldOpIntoSelect(*II, Sel))
return R;
+ if (auto *Phi = dyn_cast<PHINode>(Op))
+ if (Instruction *R = foldOpIntoPhi(*II, Phi))
+ return R;
+ }
if (Instruction *Shuf = foldShuffledIntrinsicOperands(II))
return Shuf;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 00b877b..fe0f308 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -462,6 +462,13 @@ Instruction *InstCombinerImpl::visitExtractElementInst(ExtractElementInst &EI) {
return ScalarPHI;
}
+ // If SrcVec is a subvector starting at index 0, extract from the
+ // wider source vector
+ Value *V;
+ if (match(SrcVec,
+ m_Intrinsic<Intrinsic::vector_extract>(m_Value(V), m_Zero())))
+ return ExtractElementInst::Create(V, Index);
+
// TODO come up with a n-ary matcher that subsumes both unary and
// binary matchers.
UnaryOperator *UO;
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 9e33320..5ee3bb1 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -1994,6 +1994,8 @@ Instruction *InstCombinerImpl::foldOpIntoPhi(Instruction &I, PHINode *PN,
}
Clone = InsertNewInstBefore(Clone, OpBB->getTerminator()->getIterator());
Clones.insert({OpBB, Clone});
+ // We may have speculated the instruction.
+ Clone->dropUBImplyingAttrsAndMetadata();
}
NewPhiValues[OpIndex] = Clone;
@@ -2009,12 +2011,17 @@ Instruction *InstCombinerImpl::foldOpIntoPhi(Instruction &I, PHINode *PN,
NewPN->addIncoming(NewPhiValues[i], PN->getIncomingBlock(i));
if (IdenticalUsers) {
- for (User *U : make_early_inc_range(PN->users())) {
+ // Collect and deduplicate users up-front to avoid iterator invalidation.
+ SmallSetVector<Instruction *, 4> ToReplace;
+ for (User *U : PN->users()) {
Instruction *User = cast<Instruction>(U);
if (User == &I)
continue;
- replaceInstUsesWith(*User, NewPN);
- eraseInstFromFunction(*User);
+ ToReplace.insert(User);
+ }
+ for (Instruction *I : ToReplace) {
+ replaceInstUsesWith(*I, NewPN);
+ eraseInstFromFunction(*I);
}
OneUse = true;
}
@@ -2652,9 +2659,18 @@ static Instruction *canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP,
APInt NewOffset = TypeSize * *C2 + *C1;
if (NewOffset.isZero() ||
(Src->hasOneUse() && GEP.getOperand(1)->hasOneUse())) {
+ GEPNoWrapFlags Flags = GEPNoWrapFlags::none();
+ if (GEP.hasNoUnsignedWrap() &&
+ cast<GEPOperator>(Src)->hasNoUnsignedWrap() &&
+ match(GEP.getOperand(1), m_NUWAddLike(m_Value(), m_Value()))) {
+ Flags |= GEPNoWrapFlags::noUnsignedWrap();
+ if (GEP.isInBounds() && cast<GEPOperator>(Src)->isInBounds())
+ Flags |= GEPNoWrapFlags::inBounds();
+ }
+
Value *GEPConst =
- IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset));
- return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex);
+ IC.Builder.CreatePtrAdd(Base, IC.Builder.getInt(NewOffset), "", Flags);
+ return GetElementPtrInst::Create(BaseType, GEPConst, VarIndex, Flags);
}
return nullptr;
@@ -3182,7 +3198,16 @@ Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// If we are using a wider index than needed for this platform, shrink
// it to what we need. If narrower, sign-extend it to what we need.
// This explicit cast can make subsequent optimizations more obvious.
- *I = Builder.CreateIntCast(*I, NewIndexType, true);
+ if (IndexTy->getScalarSizeInBits() <
+ NewIndexType->getScalarSizeInBits()) {
+ if (GEP.hasNoUnsignedWrap() && GEP.hasNoUnsignedSignedWrap())
+ *I = Builder.CreateZExt(*I, NewIndexType, "", /*IsNonNeg=*/true);
+ else
+ *I = Builder.CreateSExt(*I, NewIndexType);
+ } else {
+ *I = Builder.CreateTrunc(*I, NewIndexType, "", GEP.hasNoUnsignedWrap(),
+ GEP.hasNoUnsignedSignedWrap());
+ }
MadeChange = true;
}
}
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 4e5a8d1..bcb90d6 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -160,6 +160,16 @@ static cl::opt<bool> ClGenerateTagsWithCalls(
static cl::opt<bool> ClGlobals("hwasan-globals", cl::desc("Instrument globals"),
cl::Hidden, cl::init(false));
+static cl::opt<bool> ClAllGlobals(
+ "hwasan-all-globals",
+ cl::desc(
+ "Instrument globals, even those within user-defined sections. Warning: "
+ "This may break existing code which walks globals via linker-generated "
+ "symbols, expects certain globals to be contiguous with each other, or "
+ "makes other assumptions which are invalidated by HWASan "
+ "instrumentation."),
+ cl::Hidden, cl::init(false));
+
static cl::opt<int> ClMatchAllTag(
"hwasan-match-all-tag",
cl::desc("don't report bad accesses via pointers with this tag"),
@@ -681,11 +691,11 @@ void HWAddressSanitizer::initializeModule() {
!CompileKernel && !UsePageAliases && optOr(ClGlobals, NewRuntime);
if (!CompileKernel) {
- createHwasanCtorComdat();
-
if (InstrumentGlobals)
instrumentGlobals();
+ createHwasanCtorComdat();
+
bool InstrumentPersonalityFunctions =
optOr(ClInstrumentPersonalityFunctions, NewRuntime);
if (InstrumentPersonalityFunctions)
@@ -1772,11 +1782,17 @@ void HWAddressSanitizer::instrumentGlobals() {
if (GV.hasCommonLinkage())
continue;
- // Globals with custom sections may be used in __start_/__stop_ enumeration,
- // which would be broken both by adding tags and potentially by the extra
- // padding/alignment that we insert.
- if (GV.hasSection())
- continue;
+ if (ClAllGlobals) {
+ // Avoid instrumenting intrinsic global variables.
+ if (GV.getSection() == "llvm.metadata")
+ continue;
+ } else {
+ // Globals with custom sections may be used in __start_/__stop_
+ // enumeration, which would be broken both by adding tags and potentially
+ // by the extra padding/alignment that we insert.
+ if (GV.hasSection())
+ continue;
+ }
Globals.push_back(&GV);
}
diff --git a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
index e706a6f..deff79b 100644
--- a/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp
@@ -237,8 +237,7 @@ class InductiveRangeCheckElimination {
DominatorTree &DT;
LoopInfo &LI;
- using GetBFIFunc =
- std::optional<llvm::function_ref<llvm::BlockFrequencyInfo &()>>;
+ using GetBFIFunc = llvm::function_ref<llvm::BlockFrequencyInfo &()>;
GetBFIFunc GetBFI;
// Returns the estimated number of iterations based on block frequency info if
@@ -249,7 +248,7 @@ class InductiveRangeCheckElimination {
public:
InductiveRangeCheckElimination(ScalarEvolution &SE,
BranchProbabilityInfo *BPI, DominatorTree &DT,
- LoopInfo &LI, GetBFIFunc GetBFI = std::nullopt)
+ LoopInfo &LI, GetBFIFunc GetBFI = nullptr)
: SE(SE), BPI(BPI), DT(DT), LI(LI), GetBFI(GetBFI) {}
bool run(Loop *L, function_ref<void(Loop *, bool)> LPMAddNewLoop);
@@ -959,7 +958,7 @@ PreservedAnalyses IRCEPass::run(Function &F, FunctionAnalysisManager &AM) {
std::optional<uint64_t>
InductiveRangeCheckElimination::estimatedTripCount(const Loop &L) {
if (GetBFI) {
- BlockFrequencyInfo &BFI = (*GetBFI)();
+ BlockFrequencyInfo &BFI = GetBFI();
uint64_t hFreq = BFI.getBlockFreq(L.getHeader()).getFrequency();
uint64_t phFreq = BFI.getBlockFreq(L.getLoopPreheader()).getFrequency();
if (phFreq == 0 || hFreq == 0)
diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp
index 68094c3..c3f80f9 100644
--- a/llvm/lib/Transforms/Scalar/LICM.cpp
+++ b/llvm/lib/Transforms/Scalar/LICM.cpp
@@ -2508,6 +2508,12 @@ static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
if (!GEP)
return false;
+ // Do not try to hoist a constant GEP out of the loop via reassociation.
+ // Constant GEPs can often be folded into addressing modes, and reassociating
+ // them may inhibit CSE of a common base.
+ if (GEP->hasAllConstantIndices())
+ return false;
+
auto *Src = dyn_cast<GetElementPtrInst>(GEP->getPointerOperand());
if (!Src || !Src->hasOneUse() || !L.contains(Src))
return false;
diff --git a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
index 320b792..6ffe841 100644
--- a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
+++ b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
@@ -79,8 +79,7 @@
// ld.global.f32 %f4, [%rl6+132]; // much better
//
// Another improvement enabled by the LowerGEP flag is to lower a GEP with
-// multiple indices to either multiple GEPs with a single index or arithmetic
-// operations (depending on whether the target uses alias analysis in codegen).
+// multiple indices to multiple GEPs with a single index.
// Such transformation can have following benefits:
// (1) It can always extract constants in the indices of structure type.
// (2) After such Lowering, there are more optimization opportunities such as
@@ -88,59 +87,33 @@
//
// E.g. The following GEPs have multiple indices:
// BB1:
-// %p = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 3
+// %p = getelementptr [10 x %struct], ptr %ptr, i64 %i, i64 %j1, i32 3
// load %p
// ...
// BB2:
-// %p2 = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 2
+// %p2 = getelementptr [10 x %struct], ptr %ptr, i64 %i, i64 %j1, i32 2
// load %p2
// ...
//
// We can not do CSE to the common part related to index "i64 %i". Lowering
// GEPs can achieve such goals.
-// If the target does not use alias analysis in codegen, this pass will
-// lower a GEP with multiple indices into arithmetic operations:
-// BB1:
-// %1 = ptrtoint [10 x %struct]* %ptr to i64 ; CSE opportunity
-// %2 = mul i64 %i, length_of_10xstruct ; CSE opportunity
-// %3 = add i64 %1, %2 ; CSE opportunity
-// %4 = mul i64 %j1, length_of_struct
-// %5 = add i64 %3, %4
-// %6 = add i64 %3, struct_field_3 ; Constant offset
-// %p = inttoptr i64 %6 to i32*
-// load %p
-// ...
-// BB2:
-// %7 = ptrtoint [10 x %struct]* %ptr to i64 ; CSE opportunity
-// %8 = mul i64 %i, length_of_10xstruct ; CSE opportunity
-// %9 = add i64 %7, %8 ; CSE opportunity
-// %10 = mul i64 %j2, length_of_struct
-// %11 = add i64 %9, %10
-// %12 = add i64 %11, struct_field_2 ; Constant offset
-// %p = inttoptr i64 %12 to i32*
-// load %p2
-// ...
//
-// If the target uses alias analysis in codegen, this pass will lower a GEP
-// with multiple indices into multiple GEPs with a single index:
+// This pass will lower a GEP with multiple indices into multiple GEPs with a
+// single index:
// BB1:
-// %1 = bitcast [10 x %struct]* %ptr to i8* ; CSE opportunity
-// %2 = mul i64 %i, length_of_10xstruct ; CSE opportunity
-// %3 = getelementptr i8* %1, i64 %2 ; CSE opportunity
+// %2 = mul i64 %i, length_of_10xstruct ; CSE opportunity
+// %3 = getelementptr i8, ptr %ptr, i64 %2 ; CSE opportunity
// %4 = mul i64 %j1, length_of_struct
-// %5 = getelementptr i8* %3, i64 %4
-// %6 = getelementptr i8* %5, struct_field_3 ; Constant offset
-// %p = bitcast i8* %6 to i32*
+// %5 = getelementptr i8, ptr %3, i64 %4
+// %p = getelementptr i8, ptr %5, struct_field_3 ; Constant offset
// load %p
// ...
// BB2:
-// %7 = bitcast [10 x %struct]* %ptr to i8* ; CSE opportunity
-// %8 = mul i64 %i, length_of_10xstruct ; CSE opportunity
-// %9 = getelementptr i8* %7, i64 %8 ; CSE opportunity
+// %8 = mul i64 %i, length_of_10xstruct ; CSE opportunity
+// %9 = getelementptr i8, ptr %ptr, i64 %8 ; CSE opportunity
// %10 = mul i64 %j2, length_of_struct
-// %11 = getelementptr i8* %9, i64 %10
-// %12 = getelementptr i8* %11, struct_field_2 ; Constant offset
-// %p2 = bitcast i8* %12 to i32*
+// %11 = getelementptr i8, ptr %9, i64 %10
+// %p2 = getelementptr i8, ptr %11, struct_field_2 ; Constant offset
// load %p2
// ...
//
@@ -408,16 +381,6 @@ private:
void lowerToSingleIndexGEPs(GetElementPtrInst *Variadic,
int64_t AccumulativeByteOffset);
- /// Lower a GEP with multiple indices into ptrtoint+arithmetics+inttoptr form.
- /// Function splitGEP already split the original GEP into a variadic part and
- /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
- /// variadic part into a set of arithmetic operations and applies
- /// AccumulativeByteOffset to it.
- /// \p Variadic The variadic part of the original GEP.
- /// \p AccumulativeByteOffset The constant offset.
- void lowerToArithmetics(GetElementPtrInst *Variadic,
- int64_t AccumulativeByteOffset);
-
/// Finds the constant offset within each index and accumulates them. If
/// LowerGEP is true, it finds in indices of both sequential and structure
/// types, otherwise it only finds in sequential indices. The output
@@ -951,55 +914,6 @@ void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
Variadic->eraseFromParent();
}
-void
-SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic,
- int64_t AccumulativeByteOffset) {
- IRBuilder<> Builder(Variadic);
- Type *IntPtrTy = DL->getIntPtrType(Variadic->getType());
- assert(IntPtrTy == DL->getIndexType(Variadic->getType()) &&
- "Pointer type must match index type for arithmetic-based lowering of "
- "split GEPs");
-
- Value *ResultPtr = Builder.CreatePtrToInt(Variadic->getOperand(0), IntPtrTy);
- gep_type_iterator GTI = gep_type_begin(*Variadic);
- // Create ADD/SHL/MUL arithmetic operations for each sequential indices. We
- // don't create arithmetics for structure indices, as they are accumulated
- // in the constant offset index.
- for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
- if (GTI.isSequential()) {
- Value *Idx = Variadic->getOperand(I);
- // Skip zero indices.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
- if (CI->isZero())
- continue;
-
- APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
- GTI.getSequentialElementStride(*DL));
- // Scale the index by element size.
- if (ElementSize != 1) {
- if (ElementSize.isPowerOf2()) {
- Idx = Builder.CreateShl(
- Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
- } else {
- Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
- }
- }
- // Create an ADD for each index.
- ResultPtr = Builder.CreateAdd(ResultPtr, Idx);
- }
- }
-
- // Create an ADD for the constant offset index.
- if (AccumulativeByteOffset != 0) {
- ResultPtr = Builder.CreateAdd(
- ResultPtr, ConstantInt::get(IntPtrTy, AccumulativeByteOffset));
- }
-
- ResultPtr = Builder.CreateIntToPtr(ResultPtr, Variadic->getType());
- Variadic->replaceAllUsesWith(ResultPtr);
- Variadic->eraseFromParent();
-}
-
bool SeparateConstOffsetFromGEP::reorderGEP(GetElementPtrInst *GEP,
TargetTransformInfo &TTI) {
auto PtrGEP = dyn_cast<GetElementPtrInst>(GEP->getPointerOperand());
@@ -1091,8 +1005,8 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
// Notice that we don't remove struct field indices here. If LowerGEP is
// disabled, a structure index is not accumulated and we still use the old
// one. If LowerGEP is enabled, a structure index is accumulated in the
- // constant offset. LowerToSingleIndexGEPs or lowerToArithmetics will later
- // handle the constant offset and won't need a new structure index.
+ // constant offset. LowerToSingleIndexGEPs will later handle the constant
+ // offset and won't need a new structure index.
gep_type_iterator GTI = gep_type_begin(*GEP);
for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
if (GTI.isSequential()) {
@@ -1167,22 +1081,9 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
GEP->setNoWrapFlags(NewGEPFlags);
- // Lowers a GEP to either GEPs with a single index or arithmetic operations.
+ // Lowers a GEP to GEPs with a single index.
if (LowerGEP) {
- // As currently BasicAA does not analyze ptrtoint/inttoptr, do not lower to
- // arithmetic operations if the target uses alias analysis in codegen.
- // Additionally, pointers that aren't integral (and so can't be safely
- // converted to integers) or those whose offset size is different from their
- // pointer size (which means that doing integer arithmetic on them could
- // affect that data) can't be lowered in this way.
- unsigned AddrSpace = GEP->getPointerAddressSpace();
- bool PointerHasExtraData = DL->getPointerSizeInBits(AddrSpace) !=
- DL->getIndexSizeInBits(AddrSpace);
- if (TTI.useAA() || DL->isNonIntegralAddressSpace(AddrSpace) ||
- PointerHasExtraData)
- lowerToSingleIndexGEPs(GEP, AccumulativeByteOffset);
- else
- lowerToArithmetics(GEP, AccumulativeByteOffset);
+ lowerToSingleIndexGEPs(GEP, AccumulativeByteOffset);
return true;
}
diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index 571fa11..1eb8996 100644
--- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -1249,7 +1249,8 @@ Value *SCEVExpander::tryToReuseLCSSAPhi(const SCEVAddRecExpr *S) {
// offset, if the offset is simpler.
const SCEV *Diff = SE.getMinusSCEV(S, ExitSCEV);
const SCEV *Op = Diff;
- match(Diff, m_scev_Mul(m_scev_AllOnes(), m_SCEV(Op)));
+ match(Op, m_scev_Add(m_SCEVConstant(), m_SCEV(Op)));
+ match(Op, m_scev_Mul(m_scev_AllOnes(), m_SCEV(Op)));
match(Op, m_scev_PtrToInt(m_SCEV(Op)));
if (!isa<SCEVConstant, SCEVUnknown>(Op))
continue;
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 94b0ab8..674de57 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -198,6 +198,11 @@ static cl::opt<unsigned> MaxSwitchCasesPerResult(
"max-switch-cases-per-result", cl::Hidden, cl::init(16),
cl::desc("Limit cases to analyze when converting a switch to select"));
+static cl::opt<unsigned> MaxJumpThreadingLiveBlocks(
+ "max-jump-threading-live-blocks", cl::Hidden, cl::init(24),
+ cl::desc("Limit number of blocks a define in a threaded block is allowed "
+ "to be live in"));
+
STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps");
STATISTIC(NumLinearMaps,
"Number of switch instructions turned into linear mapping");
@@ -3390,8 +3395,27 @@ bool SimplifyCFGOpt::speculativelyExecuteBB(BranchInst *BI,
return true;
}
+using BlocksSet = SmallPtrSet<BasicBlock *, 8>;
+
+// Return false if number of blocks searched is too much.
+static bool findReaching(BasicBlock *BB, BasicBlock *DefBB,
+ BlocksSet &ReachesNonLocalUses) {
+ if (BB == DefBB)
+ return true;
+ if (!ReachesNonLocalUses.insert(BB).second)
+ return true;
+
+ if (ReachesNonLocalUses.size() > MaxJumpThreadingLiveBlocks)
+ return false;
+ for (BasicBlock *Pred : predecessors(BB))
+ if (!findReaching(Pred, DefBB, ReachesNonLocalUses))
+ return false;
+ return true;
+}
+
/// Return true if we can thread a branch across this block.
-static bool blockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
+static bool blockIsSimpleEnoughToThreadThrough(BasicBlock *BB,
+ BlocksSet &NonLocalUseBlocks) {
int Size = 0;
EphemeralValueTracker EphTracker;
@@ -3411,12 +3435,16 @@ static bool blockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
return false; // Don't clone large BB's.
}
- // We can only support instructions that do not define values that are
- // live outside of the current basic block.
+ // Record blocks with non-local uses of values defined in the current basic
+ // block.
for (User *U : I.users()) {
Instruction *UI = cast<Instruction>(U);
- if (UI->getParent() != BB || isa<PHINode>(UI))
- return false;
+ BasicBlock *UsedInBB = UI->getParent();
+ if (UsedInBB == BB) {
+ if (isa<PHINode>(UI))
+ return false;
+ } else
+ NonLocalUseBlocks.insert(UsedInBB);
}
// Looks ok, continue checking.
@@ -3475,18 +3503,37 @@ foldCondBranchOnValueKnownInPredecessorImpl(BranchInst *BI, DomTreeUpdater *DTU,
return false;
// Now we know that this block has multiple preds and two succs.
- // Check that the block is small enough and values defined in the block are
- // not used outside of it.
- if (!blockIsSimpleEnoughToThreadThrough(BB))
+ // Check that the block is small enough and record which non-local blocks use
+ // values defined in the block.
+
+ BlocksSet NonLocalUseBlocks;
+ BlocksSet ReachesNonLocalUseBlocks;
+ if (!blockIsSimpleEnoughToThreadThrough(BB, NonLocalUseBlocks))
return false;
+ // Jump-threading can only be done to destinations where no values defined
+ // in BB are live.
+
+ // Quickly check if both destinations have uses. If so, jump-threading cannot
+ // be done.
+ if (NonLocalUseBlocks.contains(BI->getSuccessor(0)) &&
+ NonLocalUseBlocks.contains(BI->getSuccessor(1)))
+ return false;
+
+ // Search backward from NonLocalUseBlocks to find which blocks
+ // reach non-local uses.
+ for (BasicBlock *UseBB : NonLocalUseBlocks)
+ // Give up if too many blocks are searched.
+ if (!findReaching(UseBB, BB, ReachesNonLocalUseBlocks))
+ return false;
+
for (const auto &Pair : KnownValues) {
- // Okay, we now know that all edges from PredBB should be revectored to
- // branch to RealDest.
ConstantInt *CB = Pair.first;
ArrayRef<BasicBlock *> PredBBs = Pair.second.getArrayRef();
BasicBlock *RealDest = BI->getSuccessor(!CB->getZExtValue());
+ // Okay, we now know that all edges from PredBB should be revectored to
+ // branch to RealDest.
if (RealDest == BB)
continue; // Skip self loops.
@@ -3496,6 +3543,10 @@ foldCondBranchOnValueKnownInPredecessorImpl(BranchInst *BI, DomTreeUpdater *DTU,
}))
continue;
+ // Only revector to RealDest if no values defined in BB are live.
+ if (ReachesNonLocalUseBlocks.contains(RealDest))
+ continue;
+
LLVM_DEBUG({
dbgs() << "Condition " << *Cond << " in " << BB->getName()
<< " has value " << *Pair.first << " in predecessors:\n";
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
index 969d225..c47fd942 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp
@@ -1665,13 +1665,12 @@ bool LoopVectorizationLegality::isVectorizableEarlyExitLoop() {
// Keep a record of all the exiting blocks.
SmallVector<const SCEVPredicate *, 4> Predicates;
- std::optional<std::pair<BasicBlock *, BasicBlock *>> SingleUncountableEdge;
+ BasicBlock *SingleUncountableExitingBlock = nullptr;
for (BasicBlock *BB : ExitingBlocks) {
const SCEV *EC =
PSE.getSE()->getPredicatedExitCount(TheLoop, BB, &Predicates);
if (isa<SCEVCouldNotCompute>(EC)) {
- SmallVector<BasicBlock *, 2> Succs(successors(BB));
- if (Succs.size() != 2) {
+ if (size(successors(BB)) != 2) {
reportVectorizationFailure(
"Early exiting block does not have exactly two successors",
"Incorrect number of successors from early exiting block",
@@ -1679,15 +1678,7 @@ bool LoopVectorizationLegality::isVectorizableEarlyExitLoop() {
return false;
}
- BasicBlock *ExitBlock;
- if (!TheLoop->contains(Succs[0]))
- ExitBlock = Succs[0];
- else {
- assert(!TheLoop->contains(Succs[1]));
- ExitBlock = Succs[1];
- }
-
- if (SingleUncountableEdge) {
+ if (SingleUncountableExitingBlock) {
reportVectorizationFailure(
"Loop has too many uncountable exits",
"Cannot vectorize early exit loop with more than one early exit",
@@ -1695,7 +1686,7 @@ bool LoopVectorizationLegality::isVectorizableEarlyExitLoop() {
return false;
}
- SingleUncountableEdge = {BB, ExitBlock};
+ SingleUncountableExitingBlock = BB;
} else
CountableExitingBlocks.push_back(BB);
}
@@ -1705,7 +1696,7 @@ bool LoopVectorizationLegality::isVectorizableEarlyExitLoop() {
// PSE.getSymbolicMaxBackedgeTakenCount() below.
Predicates.clear();
- if (!SingleUncountableEdge) {
+ if (!SingleUncountableExitingBlock) {
LLVM_DEBUG(dbgs() << "LV: Cound not find any uncountable exits");
return false;
}
@@ -1713,7 +1704,7 @@ bool LoopVectorizationLegality::isVectorizableEarlyExitLoop() {
// The only supported early exit loops so far are ones where the early
// exiting block is a unique predecessor of the latch block.
BasicBlock *LatchPredBB = LatchBB->getUniquePredecessor();
- if (LatchPredBB != SingleUncountableEdge->first) {
+ if (LatchPredBB != SingleUncountableExitingBlock) {
reportVectorizationFailure("Early exit is not the latch predecessor",
"Cannot vectorize early exit loop",
"EarlyExitNotLatchPredecessor", ORE, TheLoop);
@@ -1766,7 +1757,7 @@ bool LoopVectorizationLegality::isVectorizableEarlyExitLoop() {
}
// The vectoriser cannot handle loads that occur after the early exit block.
- assert(LatchBB->getUniquePredecessor() == SingleUncountableEdge->first &&
+ assert(LatchBB->getUniquePredecessor() == SingleUncountableExitingBlock &&
"Expected latch predecessor to be the early exiting block");
// TODO: Handle loops that may fault.
@@ -1789,7 +1780,7 @@ bool LoopVectorizationLegality::isVectorizableEarlyExitLoop() {
LLVM_DEBUG(dbgs() << "LV: Found an early exit loop with symbolic max "
"backedge taken count: "
<< *SymbolicMaxBTC << '\n');
- UncountableEdge = SingleUncountableEdge;
+ UncountableExitingBB = SingleUncountableExitingBlock;
return true;
}
@@ -1861,7 +1852,8 @@ bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) {
return false;
} else {
if (!isVectorizableEarlyExitLoop()) {
- UncountableEdge = std::nullopt;
+ assert(!hasUncountableEarlyExit() &&
+ "Must be false without vectorizable early-exit loop");
if (DoExtraAnalysis)
Result = false;
else
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
index f57ce0c..ea0fa06 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h
@@ -170,8 +170,7 @@ public:
new VPInstruction(Opcode, Operands, Flags, DL, Name));
}
- VPInstruction *createNaryOp(unsigned Opcode,
- std::initializer_list<VPValue *> Operands,
+ VPInstruction *createNaryOp(unsigned Opcode, ArrayRef<VPValue *> Operands,
Type *ResultTy, const VPIRFlags &Flags = {},
DebugLoc DL = DebugLoc::getUnknown(),
const Twine &Name = "") {
@@ -180,7 +179,7 @@ public:
}
VPInstruction *createOverflowingOp(unsigned Opcode,
- std::initializer_list<VPValue *> Operands,
+ ArrayRef<VPValue *> Operands,
VPRecipeWithIRFlags::WrapFlagsTy WrapFlags,
DebugLoc DL = DebugLoc::getUnknown(),
const Twine &Name = "") {
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index fe93fcd..850c4a1 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -3150,7 +3150,7 @@ bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
!isScalarEpilogueAllowed();
bool StoreAccessWithGapsRequiresMasking =
- isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
+ isa<StoreInst>(I) && !Group->isFull();
if (!PredicatedAccessRequiresMasking &&
!LoadAccessWithGapsRequiresEpilogMasking &&
!StoreAccessWithGapsRequiresMasking)
@@ -5372,7 +5372,7 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
// Calculate the cost of the whole interleaved group.
bool UseMaskForGaps =
(Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
- (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
+ (isa<StoreInst>(I) && !Group->isFull());
InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
InsertPos->getOpcode(), WideVecTy, Group->getFactor(), Indices,
Group->getAlign(), AS, CostKind, Legal->isMaskRequired(I),
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 68e7c20..11b4677 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -2530,8 +2530,8 @@ void VPReductionRecipe::execute(VPTransformState &State) {
NextInChain = createMinMaxOp(State.Builder, Kind, NewRed, PrevInChain);
else
NextInChain = State.Builder.CreateBinOp(
- (Instruction::BinaryOps)RecurrenceDescriptor::getOpcode(Kind), NewRed,
- PrevInChain);
+ (Instruction::BinaryOps)RecurrenceDescriptor::getOpcode(Kind),
+ PrevInChain, NewRed);
}
State.set(this, NextInChain, /*IsScalar*/ true);
}
@@ -3548,6 +3548,8 @@ void VPInterleaveRecipe::execute(VPTransformState &State) {
// Vectorize the interleaved store group.
Value *MaskForGaps =
createBitMaskForGaps(State.Builder, State.VF.getKnownMinValue(), *Group);
+ assert(((MaskForGaps != nullptr) == NeedsMaskForGaps) &&
+ "Mismatch between NeedsMaskForGaps and MaskForGaps");
assert((!MaskForGaps || !State.VF.isScalable()) &&
"masking gaps for scalable vectors is not yet supported.");
ArrayRef<VPValue *> StoredValues = getStoredValues();
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 47a9ff0..fcbc86f 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -32,11 +32,11 @@
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
-#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/TypeSize.h"
using namespace llvm;
+using namespace VPlanPatternMatch;
bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes(
VPlanPtr &Plan,
@@ -528,13 +528,11 @@ static void removeRedundantCanonicalIVs(VPlan &Plan) {
/// Returns true if \p R is dead and can be removed.
static bool isDeadRecipe(VPRecipeBase &R) {
- using namespace llvm::PatternMatch;
// Do remove conditional assume instructions as their conditions may be
// flattened.
auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
- bool IsConditionalAssume =
- RepR && RepR->isPredicated() &&
- match(RepR->getUnderlyingInstr(), m_Intrinsic<Intrinsic::assume>());
+ bool IsConditionalAssume = RepR && RepR->isPredicated() &&
+ match(RepR, m_Intrinsic<Intrinsic::assume>());
if (IsConditionalAssume)
return true;
@@ -625,7 +623,6 @@ static SmallVector<VPUser *> collectUsersRecursively(VPValue *V) {
/// original IV's users. This is an optional optimization to reduce the needs of
/// vector extracts.
static void legalizeAndOptimizeInductions(VPlan &Plan) {
- using namespace llvm::VPlanPatternMatch;
VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
bool HasOnlyVectorVFs = !Plan.hasScalarVFOnly();
VPBuilder Builder(HeaderVPBB, HeaderVPBB->getFirstNonPhi());
@@ -727,7 +724,6 @@ static VPWidenInductionRecipe *getOptimizableIVOf(VPValue *VPV) {
return nullptr;
auto IsWideIVInc = [&]() {
- using namespace VPlanPatternMatch;
auto &ID = WideIV->getInductionDescriptor();
// Check if VPV increments the induction by the induction step.
@@ -771,8 +767,6 @@ static VPValue *optimizeEarlyExitInductionUser(VPlan &Plan,
VPTypeAnalysis &TypeInfo,
VPBlockBase *PredVPBB,
VPValue *Op) {
- using namespace VPlanPatternMatch;
-
VPValue *Incoming, *Mask;
if (!match(Op, m_VPInstruction<VPInstruction::ExtractLane>(
m_VPInstruction<VPInstruction::FirstActiveLane>(
@@ -827,8 +821,6 @@ static VPValue *
optimizeLatchExitInductionUser(VPlan &Plan, VPTypeAnalysis &TypeInfo,
VPBlockBase *PredVPBB, VPValue *Op,
DenseMap<VPValue *, VPValue *> &EndValues) {
- using namespace VPlanPatternMatch;
-
VPValue *Incoming;
if (!match(Op, m_VPInstruction<VPInstruction::ExtractLastElement>(
m_VPValue(Incoming))))
@@ -986,7 +978,6 @@ static Value *tryToFoldLiveIns(const VPRecipeBase &R, unsigned Opcode,
/// Try to simplify recipe \p R.
static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
- using namespace llvm::VPlanPatternMatch;
VPlan *Plan = R.getParent()->getPlan();
auto *Def = dyn_cast<VPSingleDefRecipe>(&R);
@@ -1269,7 +1260,6 @@ static void narrowToSingleScalarRecipes(VPlan &Plan) {
/// Normalize and simplify VPBlendRecipes. Should be run after simplifyRecipes
/// to make sure the masks are simplified.
static void simplifyBlends(VPlan &Plan) {
- using namespace llvm::VPlanPatternMatch;
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
vp_depth_first_shallow(Plan.getVectorLoopRegion()->getEntry()))) {
for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
@@ -1393,7 +1383,6 @@ static bool optimizeVectorInductionWidthForTCAndVFUF(VPlan &Plan,
// Currently only handle cases where the single user is a header-mask
// comparison with the backedge-taken-count.
- using namespace VPlanPatternMatch;
if (!match(
*WideIV->user_begin(),
m_Binary<Instruction::ICmp>(
@@ -1424,8 +1413,7 @@ static bool optimizeVectorInductionWidthForTCAndVFUF(VPlan &Plan,
static bool isConditionTrueViaVFAndUF(VPValue *Cond, VPlan &Plan,
ElementCount BestVF, unsigned BestUF,
ScalarEvolution &SE) {
- using namespace llvm::VPlanPatternMatch;
- if (match(Cond, m_Binary<Instruction::Or>(m_VPValue(), m_VPValue())))
+ if (match(Cond, m_BinaryOr(m_VPValue(), m_VPValue())))
return any_of(Cond->getDefiningRecipe()->operands(), [&Plan, BestVF, BestUF,
&SE](VPValue *C) {
return isConditionTrueViaVFAndUF(C, Plan, BestVF, BestUF, SE);
@@ -1464,7 +1452,6 @@ static bool simplifyBranchConditionForVFAndUF(VPlan &Plan, ElementCount BestVF,
auto *Term = &ExitingVPBB->back();
VPValue *Cond;
ScalarEvolution &SE = *PSE.getSE();
- using namespace llvm::VPlanPatternMatch;
if (match(Term, m_BranchOnCount(m_VPValue(), m_VPValue())) ||
match(Term, m_BranchOnCond(
m_Not(m_ActiveLaneMask(m_VPValue(), m_VPValue()))))) {
@@ -1496,11 +1483,11 @@ static bool simplifyBranchConditionForVFAndUF(VPlan &Plan, ElementCount BestVF,
auto *CanIVTy = Plan.getCanonicalIV()->getScalarType();
if (all_of(Header->phis(),
IsaPred<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe,
- VPFirstOrderRecurrencePHIRecipe>)) {
+ VPFirstOrderRecurrencePHIRecipe, VPPhi>)) {
for (VPRecipeBase &HeaderR : make_early_inc_range(Header->phis())) {
- auto *HeaderPhiR = cast<VPHeaderPHIRecipe>(&HeaderR);
- HeaderPhiR->replaceAllUsesWith(HeaderPhiR->getStartValue());
- HeaderPhiR->eraseFromParent();
+ auto *Phi = cast<VPPhiAccessors>(&HeaderR);
+ HeaderR.getVPSingleValue()->replaceAllUsesWith(Phi->getIncomingValue(0));
+ HeaderR.eraseFromParent();
}
VPBlockBase *Preheader = VectorRegion->getSinglePredecessor();
@@ -1847,7 +1834,6 @@ void VPlanTransforms::truncateToMinimalBitwidths(
if (auto *VPW = dyn_cast<VPRecipeWithIRFlags>(&R))
VPW->dropPoisonGeneratingFlags();
- using namespace llvm::VPlanPatternMatch;
if (OldResSizeInBits != NewResSizeInBits &&
!match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue()))) {
// Extend result to original width.
@@ -1897,7 +1883,6 @@ void VPlanTransforms::truncateToMinimalBitwidths(
}
void VPlanTransforms::removeBranchOnConst(VPlan &Plan) {
- using namespace llvm::VPlanPatternMatch;
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
vp_depth_first_shallow(Plan.getEntry()))) {
VPValue *Cond;
@@ -2143,7 +2128,6 @@ static VPRecipeBase *optimizeMaskToEVL(VPValue *HeaderMask,
VPRecipeBase &CurRecipe,
VPTypeAnalysis &TypeInfo,
VPValue &AllOneMask, VPValue &EVL) {
- using namespace llvm::VPlanPatternMatch;
auto GetNewMask = [&](VPValue *OrigMask) -> VPValue * {
assert(OrigMask && "Unmasked recipe when folding tail");
// HeaderMask will be handled using EVL.
@@ -2223,7 +2207,6 @@ static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) {
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
vp_depth_first_deep(Plan.getVectorLoopRegion()->getEntry()))) {
for (VPRecipeBase &R : *VPBB) {
- using namespace VPlanPatternMatch;
VPValue *V1, *V2;
if (!match(&R,
m_VPInstruction<VPInstruction::FirstOrderRecurrenceSplice>(
@@ -2309,10 +2292,12 @@ static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) {
/// ...
/// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ],
/// [ %NextEVLIV, %vector.body ]
-/// %AVL = sub original TC, %EVLPhi
+/// %AVL = phi [ trip-count, %vector.ph ], [ %NextAVL, %vector.body ]
/// %VPEVL = EXPLICIT-VECTOR-LENGTH %AVL
/// ...
-/// %NextEVLIV = add IVSize (cast i32 %VPEVVL to IVSize), %EVLPhi
+/// %OpEVL = cast i32 %VPEVL to IVSize
+/// %NextEVLIV = add IVSize %OpEVL, %EVLPhi
+/// %NextAVL = sub IVSize nuw %AVL, %OpEVL
/// ...
///
/// If MaxSafeElements is provided, the function adds the following recipes:
@@ -2323,12 +2308,14 @@ static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) {
/// ...
/// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ],
/// [ %NextEVLIV, %vector.body ]
-/// %AVL = sub original TC, %EVLPhi
+/// %AVL = phi [ trip-count, %vector.ph ], [ %NextAVL, %vector.body ]
/// %cmp = cmp ult %AVL, MaxSafeElements
/// %SAFE_AVL = select %cmp, %AVL, MaxSafeElements
/// %VPEVL = EXPLICIT-VECTOR-LENGTH %SAFE_AVL
/// ...
-/// %NextEVLIV = add IVSize (cast i32 %VPEVL to IVSize), %EVLPhi
+/// %OpEVL = cast i32 %VPEVL to IVSize
+/// %NextEVLIV = add IVSize %OpEVL, %EVLPhi
+/// %NextAVL = sub IVSize nuw %AVL, %OpEVL
/// ...
///
bool VPlanTransforms::tryAddExplicitVectorLength(
@@ -2350,9 +2337,12 @@ bool VPlanTransforms::tryAddExplicitVectorLength(
auto *EVLPhi = new VPEVLBasedIVPHIRecipe(StartV, DebugLoc());
EVLPhi->insertAfter(CanonicalIVPHI);
VPBuilder Builder(Header, Header->getFirstNonPhi());
- // Compute original TC - IV as the AVL (application vector length).
- VPValue *AVL = Builder.createNaryOp(
- Instruction::Sub, {Plan.getTripCount(), EVLPhi}, DebugLoc(), "avl");
+ // Create the AVL (application vector length), starting from TC -> 0 in steps
+ // of EVL.
+ VPPhi *AVLPhi = Builder.createScalarPhi(
+ {Plan.getTripCount()}, DebugLoc::getCompilerGenerated(), "avl");
+ VPValue *AVL = AVLPhi;
+
if (MaxSafeElements) {
// Support for MaxSafeDist for correct loop emission.
VPValue *AVLSafe =
@@ -2379,6 +2369,11 @@ bool VPlanTransforms::tryAddExplicitVectorLength(
CanonicalIVIncrement->getDebugLoc(), "index.evl.next");
EVLPhi->addOperand(NextEVLIV);
+ VPValue *NextAVL = Builder.createOverflowingOp(
+ Instruction::Sub, {AVLPhi, OpVPEVL}, {/*hasNUW=*/true, /*hasNSW=*/false},
+ DebugLoc::getCompilerGenerated(), "avl.next");
+ AVLPhi->addOperand(NextAVL);
+
transformRecipestoEVLRecipes(Plan, *VPEVL);
// Replace all uses of VPCanonicalIVPHIRecipe by
@@ -2391,7 +2386,6 @@ bool VPlanTransforms::tryAddExplicitVectorLength(
}
void VPlanTransforms::canonicalizeEVLLoops(VPlan &Plan) {
- using namespace llvm::VPlanPatternMatch;
// Find EVL loop entries by locating VPEVLBasedIVPHIRecipe.
// There should be only one EVL PHI in the entire plan.
VPEVLBasedIVPHIRecipe *EVLPhi = nullptr;
@@ -2480,7 +2474,6 @@ void VPlanTransforms::dropPoisonGeneratingRecipes(
// drop them directly.
if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(CurRec)) {
VPValue *A, *B;
- using namespace llvm::VPlanPatternMatch;
// Dropping disjoint from an OR may yield incorrect results, as some
// analysis may have converted it to an Add implicitly (e.g. SCEV used
// for dependence analysis). Instead, replace it with an equivalent Add.
@@ -2570,7 +2563,8 @@ void VPlanTransforms::createInterleaveGroups(
}
bool NeedsMaskForGaps =
- IG->requiresScalarEpilogue() && !ScalarEpilogueAllowed;
+ (IG->requiresScalarEpilogue() && !ScalarEpilogueAllowed) ||
+ (!StoredValues.empty() && !IG->isFull());
Instruction *IRInsertPos = IG->getInsertPos();
auto *InsertPos =
@@ -2774,8 +2768,6 @@ void VPlanTransforms::dissolveLoopRegions(VPlan &Plan) {
void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan,
Type &CanonicalIVTy) {
- using namespace llvm::VPlanPatternMatch;
-
VPTypeAnalysis TypeInfo(&CanonicalIVTy);
SmallVector<VPRecipeBase *> ToRemove;
for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
@@ -2852,8 +2844,6 @@ void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan,
void VPlanTransforms::handleUncountableEarlyExit(
VPBasicBlock *EarlyExitingVPBB, VPBasicBlock *EarlyExitVPBB, VPlan &Plan,
VPBasicBlock *HeaderVPBB, VPBasicBlock *LatchVPBB, VFRange &Range) {
- using namespace llvm::VPlanPatternMatch;
-
VPBlockBase *MiddleVPBB = LatchVPBB->getSuccessors()[0];
if (!EarlyExitVPBB->getSinglePredecessor() &&
EarlyExitVPBB->getPredecessors()[1] == MiddleVPBB) {
@@ -2947,8 +2937,6 @@ void VPlanTransforms::handleUncountableEarlyExit(
static VPExpressionRecipe *
tryToMatchAndCreateExtendedReduction(VPReductionRecipe *Red, VPCostContext &Ctx,
VFRange &Range) {
- using namespace VPlanPatternMatch;
-
Type *RedTy = Ctx.Types.inferScalarType(Red);
VPValue *VecOp = Red->getVecOp();
@@ -2994,8 +2982,6 @@ tryToMatchAndCreateExtendedReduction(VPReductionRecipe *Red, VPCostContext &Ctx,
static VPExpressionRecipe *
tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red,
VPCostContext &Ctx, VFRange &Range) {
- using namespace VPlanPatternMatch;
-
unsigned Opcode = RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind());
if (Opcode != Instruction::Add)
return nullptr;
@@ -3209,9 +3195,7 @@ static bool canNarrowLoad(VPWidenRecipe *WideMember0, unsigned OpIdx,
return !W->getMask() && WideMember0->getOperand(OpIdx) == OpV;
if (auto *IR = dyn_cast<VPInterleaveRecipe>(DefR))
- return IR->getInterleaveGroup()->getFactor() ==
- IR->getInterleaveGroup()->getNumMembers() &&
- IR->getVPValue(Idx) == OpV;
+ return IR->getInterleaveGroup()->isFull() && IR->getVPValue(Idx) == OpV;
return false;
}
@@ -3258,7 +3242,6 @@ static bool isAlreadyNarrow(VPValue *VPV) {
void VPlanTransforms::narrowInterleaveGroups(VPlan &Plan, ElementCount VF,
unsigned VectorRegWidth) {
- using namespace llvm::VPlanPatternMatch;
VPRegionBlock *VectorLoop = Plan.getVectorLoopRegion();
if (VF.isScalable() || !VectorLoop)
return;
@@ -3328,9 +3311,7 @@ void VPlanTransforms::narrowInterleaveGroups(VPlan &Plan, ElementCount VF,
if (!DefR)
return false;
auto *IR = dyn_cast<VPInterleaveRecipe>(DefR);
- return IR &&
- IR->getInterleaveGroup()->getFactor() ==
- IR->getInterleaveGroup()->getNumMembers() &&
+ return IR && IR->getInterleaveGroup()->isFull() &&
IR->getVPValue(Op.index()) == Op.value();
})) {
StoreGroups.push_back(InterleaveR);
diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
index 57d01cb..14ae4f2 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp
@@ -79,9 +79,8 @@ bool VPlanVerifier::verifyPhiRecipes(const VPBasicBlock *VPBB) {
if (isa<VPActiveLaneMaskPHIRecipe>(RecipeI))
NumActiveLaneMaskPhiRecipes++;
- if (IsHeaderVPBB && !isa<VPHeaderPHIRecipe, VPWidenPHIRecipe>(*RecipeI) &&
- !isa<VPInstruction>(*RecipeI) &&
- cast<VPInstruction>(RecipeI)->getOpcode() == Instruction::PHI) {
+ if (IsHeaderVPBB &&
+ !isa<VPHeaderPHIRecipe, VPWidenPHIRecipe, VPPhi>(*RecipeI)) {
errs() << "Found non-header PHI recipe in header VPBB";
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
errs() << ": ";
@@ -173,7 +172,8 @@ bool VPlanVerifier::verifyEVLRecipe(const VPInstruction &EVL) const {
[&](const VPInstructionWithType *S) { return VerifyEVLUse(*S, 0); })
.Case<VPInstruction>([&](const VPInstruction *I) {
if (I->getOpcode() == Instruction::PHI ||
- I->getOpcode() == Instruction::ICmp)
+ I->getOpcode() == Instruction::ICmp ||
+ I->getOpcode() == Instruction::Sub)
return VerifyEVLUse(*I, 1);
switch (I->getOpcode()) {
case Instruction::Add:
diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
index 6252f4f..6345b18 100644
--- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
+++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
@@ -1664,6 +1664,8 @@ static Align computeAlignmentAfterScalarization(Align VectorAlignment,
// %1 = getelementptr inbounds i32, i32* %0, i64 0, i64 1
// store i32 %b, i32* %1
bool VectorCombine::foldSingleElementStore(Instruction &I) {
+ if (!TTI.allowVectorElementIndexingUsingGEP())
+ return false;
auto *SI = cast<StoreInst>(&I);
if (!SI->isSimple() || !isa<VectorType>(SI->getValueOperand()->getType()))
return false;
@@ -1719,6 +1721,9 @@ bool VectorCombine::foldSingleElementStore(Instruction &I) {
/// Try to scalarize vector loads feeding extractelement instructions.
bool VectorCombine::scalarizeLoadExtract(Instruction &I) {
+ if (!TTI.allowVectorElementIndexingUsingGEP())
+ return false;
+
Value *Ptr;
if (!match(&I, m_Load(m_Value(Ptr))))
return false;
@@ -1827,6 +1832,8 @@ bool VectorCombine::scalarizeLoadExtract(Instruction &I) {
}
bool VectorCombine::scalarizeExtExtract(Instruction &I) {
+ if (!TTI.allowVectorElementIndexingUsingGEP())
+ return false;
auto *Ext = dyn_cast<ZExtInst>(&I);
if (!Ext)
return false;
diff --git a/llvm/test/Analysis/CostModel/RISCV/fround.ll b/llvm/test/Analysis/CostModel/RISCV/fround.ll
index 189e57e..23572898 100644
--- a/llvm/test/Analysis/CostModel/RISCV/fround.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/fround.ll
@@ -446,13 +446,13 @@ define void @lrint() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %20 = call <vscale x 16 x i32> @llvm.lrint.nxv16i32.nxv16f32(<vscale x 16 x float> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call i32 @llvm.lrint.i32.f64(double poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.lrint.v2i32.v2f64(<2 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.lrint.v4i32.v4f64(<4 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.lrint.v8i32.v8f64(<8 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.lrint.v16i32.v16f64(<16 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %23 = call <4 x i32> @llvm.lrint.v4i32.v4f64(<4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %24 = call <8 x i32> @llvm.lrint.v8i32.v8f64(<8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %25 = call <16 x i32> @llvm.lrint.v16i32.v16f64(<16 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.lrint.nxv1i32.nxv1f64(<vscale x 1 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.lrint.nxv2i32.nxv2f64(<vscale x 2 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4f64(<vscale x 4 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8f64(<vscale x 8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call <vscale x 2 x i32> @llvm.lrint.nxv2i32.nxv2f64(<vscale x 2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %28 = call <vscale x 4 x i32> @llvm.lrint.nxv4i32.nxv4f64(<vscale x 4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %29 = call <vscale x 8 x i32> @llvm.lrint.nxv8i32.nxv8f64(<vscale x 8 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %30 = call i32 @llvm.lrint.i32.bf16(bfloat poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %31 = call <2 x i64> @llvm.lrint.v2i64.v2bf16(<2 x bfloat> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %32 = call <4 x i64> @llvm.lrint.v4i64.v4bf16(<4 x bfloat> poison)
@@ -708,13 +708,13 @@ define void @lround() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %20 = call <vscale x 16 x i32> @llvm.lround.nxv16i32.nxv16f32(<vscale x 16 x float> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call i32 @llvm.lround.i32.f64(double poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.lround.v2i32.v2f64(<2 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.lround.v4i32.v4f64(<4 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.lround.v8i32.v8f64(<8 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.lround.v16i32.v16f64(<16 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %23 = call <4 x i32> @llvm.lround.v4i32.v4f64(<4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %24 = call <8 x i32> @llvm.lround.v8i32.v8f64(<8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %25 = call <16 x i32> @llvm.lround.v16i32.v16f64(<16 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.lround.nxv1i32.nxv1f64(<vscale x 1 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.lround.nxv2i32.nxv2f64(<vscale x 2 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.lround.nxv4i32.nxv4f64(<vscale x 4 x double> poison)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.lround.nxv8i32.nxv8f64(<vscale x 8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call <vscale x 2 x i32> @llvm.lround.nxv2i32.nxv2f64(<vscale x 2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %28 = call <vscale x 4 x i32> @llvm.lround.nxv4i32.nxv4f64(<vscale x 4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %29 = call <vscale x 8 x i32> @llvm.lround.nxv8i32.nxv8f64(<vscale x 8 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %30 = call <vscale x 16 x i32> @llvm.lround.nxv16i32.nxv16f64(<vscale x 16 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %31 = call i64 @llvm.lround.i64.bf16(bfloat poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.lround.v2i64.v2bf16(<2 x bfloat> poison)
diff --git a/llvm/test/Analysis/ScalarEvolution/zext-add.ll b/llvm/test/Analysis/ScalarEvolution/zext-add.ll
index 3290ee2..a08feef 100644
--- a/llvm/test/Analysis/ScalarEvolution/zext-add.ll
+++ b/llvm/test/Analysis/ScalarEvolution/zext-add.ll
@@ -17,7 +17,7 @@ define void @test_push_constant_into_zext(ptr %dst, ptr %src, i32 %n, i64 %offse
; CHECK-NEXT: %l = load i8, ptr %outer.ptr, align 1
; CHECK-NEXT: --> %l U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %inner.loop: Variant, %outer.loop: Variant }
; CHECK-NEXT: %ptr.iv.next = getelementptr i8, ptr %ptr.iv, i64 %offset
-; CHECK-NEXT: --> {(%offset + %src),+,%offset}<%inner.loop> U: full-set S: full-set Exits: (((1 + (zext i32 (-1 + (1 smax %n))<nsw> to i64))<nuw><nsw> * %offset) + %src) LoopDispositions: { %inner.loop: Computable, %outer.loop: Variant }
+; CHECK-NEXT: --> {(%offset + %src),+,%offset}<%inner.loop> U: full-set S: full-set Exits: (((zext i32 (1 smax %n) to i64) * %offset) + %src) LoopDispositions: { %inner.loop: Computable, %outer.loop: Variant }
; CHECK-NEXT: %iv.next = add i32 %iv, 1
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%inner.loop> U: [1,-2147483648) S: [1,-2147483648) Exits: (1 smax %n) LoopDispositions: { %inner.loop: Computable, %outer.loop: Variant }
; CHECK-NEXT: Determining loop execution counts for: @test_push_constant_into_zext
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
index 09e5a15..a422f60 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
@@ -667,11 +667,10 @@ body: |
; SELECT-NEXT: {{ $}}
; SELECT-NEXT: %zero:gpr64 = COPY $xzr
; SELECT-NEXT: %reg0:gpr64 = COPY $x0
- ; SELECT-NEXT: %shl:gpr64 = UBFMXri %reg0, 1, 0
+ ; SELECT-NEXT: %cmp_lhs:gpr64 = SUBSXrs %zero, %reg0, 63, implicit-def dead $nzcv
; SELECT-NEXT: %reg1:gpr64 = COPY $x1
; SELECT-NEXT: %sext_in_reg:gpr64 = SBFMXri %reg1, 0, 0
- ; SELECT-NEXT: %cmp_rhs:gpr64 = SUBSXrs %zero, %sext_in_reg, 131, implicit-def dead $nzcv
- ; SELECT-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr %shl, %cmp_rhs, implicit-def $nzcv
+ ; SELECT-NEXT: [[ADDSXrs:%[0-9]+]]:gpr64 = ADDSXrs %cmp_lhs, %sext_in_reg, 131, implicit-def $nzcv
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
; SELECT-NEXT: $w0 = COPY %cmp
; SELECT-NEXT: RET_ReallyLR implicit $w0
diff --git a/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll b/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll
index 578038b..d9cdac4 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll
@@ -1,8 +1,8 @@
; RUN: llc -O3 -aarch64-enable-gep-opt=true -verify-machineinstrs %s -o - | FileCheck %s
-; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-UseAA %s
-; RUN: llc -O3 -aarch64-enable-gep-opt=true -aarch64-use-aa=false -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-NoAA %s
-; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cyclone < %s 2>&1 | FileCheck --check-prefix=CHECK-UseAA %s
-; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s 2>&1 | FileCheck --check-prefix=CHECK-UseAA %s
+; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s
+; RUN: llc -O3 -aarch64-enable-gep-opt=true -aarch64-use-aa=false -print-after=codegenprepare < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s
+; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cyclone < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s
+; RUN: llc -O3 -aarch64-enable-gep-opt=true -print-after=codegenprepare -mcpu=cortex-a53 < %s 2>&1 | FileCheck --check-prefix=CHECK-IR %s
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
target triple = "aarch64"
@@ -38,24 +38,12 @@ if.end: ; preds = %if.then, %entry
; CHECK-NOT: madd
; CHECK:ldr
-; CHECK-NoAA-LABEL: @test_GEP_CSE(
-; CHECK-NoAA: [[PTR0:%[a-zA-Z0-9]+]] = ptrtoint ptr %string to i64
-; CHECK-NoAA: [[PTR1:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96
-; CHECK-NoAA: [[PTR2:%[a-zA-Z0-9]+]] = add i64 [[PTR0]], [[PTR1]]
-; CHECK-NoAA: add i64 [[PTR2]], 23052
-; CHECK-NoAA: inttoptr
-; CHECK-NoAA: if.then:
-; CHECK-NoAA-NOT: ptrtoint
-; CHECK-NoAA-NOT: mul
-; CHECK-NoAA: add i64 [[PTR2]], 23048
-; CHECK-NoAA: inttoptr
-
-; CHECK-UseAA-LABEL: @test_GEP_CSE(
-; CHECK-UseAA: [[IDX:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96
-; CHECK-UseAA: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8, ptr %string, i64 [[IDX]]
-; CHECK-UseAA: getelementptr i8, ptr [[PTR1]], i64 23052
-; CHECK-UseAA: if.then:
-; CHECK-UseAA: getelementptr i8, ptr [[PTR1]], i64 23048
+; CHECK-IR-LABEL: @test_GEP_CSE(
+; CHECK-IR: [[IDX:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96
+; CHECK-IR: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8, ptr %string, i64 [[IDX]]
+; CHECK-IR: getelementptr i8, ptr [[PTR1]], i64 23052
+; CHECK-IR: if.then:
+; CHECK-IR: getelementptr i8, ptr [[PTR1]], i64 23048
%class.my = type { i32, [128 x i32], i32, [256 x %struct.pt]}
%struct.pt = type { ptr, i32, i32 }
diff --git a/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll b/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll
index f7e16b8..9947fba 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-wide-mul.ll
@@ -38,14 +38,12 @@ define <16 x i32> @mul_i32(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-LABEL: mul_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v3.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v4.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v5.8h, v1.16b, #0
-; CHECK-GI-NEXT: umull v0.4s, v2.4h, v3.4h
-; CHECK-GI-NEXT: umull2 v1.4s, v2.8h, v3.8h
-; CHECK-GI-NEXT: umull v2.4s, v4.4h, v5.4h
-; CHECK-GI-NEXT: umull2 v3.4s, v4.8h, v5.8h
+; CHECK-GI-NEXT: umull v2.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v3.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: ushll v0.4s, v2.4h, #0
+; CHECK-GI-NEXT: ushll2 v1.4s, v2.8h, #0
+; CHECK-GI-NEXT: ushll v2.4s, v3.4h, #0
+; CHECK-GI-NEXT: ushll2 v3.4s, v3.8h, #0
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i32>
@@ -75,26 +73,20 @@ define <16 x i64> @mul_i64(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-LABEL: mul_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v3.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-NEXT: ushll v4.4s, v2.4h, #0
-; CHECK-GI-NEXT: ushll2 v5.4s, v2.8h, #0
-; CHECK-GI-NEXT: ushll v2.4s, v3.4h, #0
-; CHECK-GI-NEXT: ushll v6.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll2 v3.4s, v3.8h, #0
-; CHECK-GI-NEXT: ushll v7.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll2 v16.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v17.4s, v1.8h, #0
-; CHECK-GI-NEXT: umull v0.2d, v4.2s, v2.2s
-; CHECK-GI-NEXT: umull2 v1.2d, v4.4s, v2.4s
-; CHECK-GI-NEXT: umull v2.2d, v5.2s, v3.2s
-; CHECK-GI-NEXT: umull2 v3.2d, v5.4s, v3.4s
-; CHECK-GI-NEXT: umull v4.2d, v6.2s, v7.2s
-; CHECK-GI-NEXT: umull2 v5.2d, v6.4s, v7.4s
-; CHECK-GI-NEXT: umull v6.2d, v16.2s, v17.2s
-; CHECK-GI-NEXT: umull2 v7.2d, v16.4s, v17.4s
+; CHECK-GI-NEXT: umull v2.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v0.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-GI-NEXT: ushll2 v3.4s, v2.8h, #0
+; CHECK-GI-NEXT: ushll v5.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v7.4s, v0.8h, #0
+; CHECK-GI-NEXT: ushll v0.2d, v1.2s, #0
+; CHECK-GI-NEXT: ushll2 v1.2d, v1.4s, #0
+; CHECK-GI-NEXT: ushll v2.2d, v3.2s, #0
+; CHECK-GI-NEXT: ushll2 v3.2d, v3.4s, #0
+; CHECK-GI-NEXT: ushll v4.2d, v5.2s, #0
+; CHECK-GI-NEXT: ushll2 v5.2d, v5.4s, #0
+; CHECK-GI-NEXT: ushll v6.2d, v7.2s, #0
+; CHECK-GI-NEXT: ushll2 v7.2d, v7.4s, #0
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i64>
@@ -142,18 +134,12 @@ define <16 x i32> @mla_i32(<16 x i8> %a, <16 x i8> %b, <16 x i32> %c) {
;
; CHECK-GI-LABEL: mla_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v6.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v7.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-NEXT: umlal v2.4s, v6.4h, v7.4h
-; CHECK-GI-NEXT: umlal2 v3.4s, v6.8h, v7.8h
-; CHECK-GI-NEXT: umlal v4.4s, v0.4h, v1.4h
-; CHECK-GI-NEXT: umlal2 v5.4s, v0.8h, v1.8h
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
+; CHECK-GI-NEXT: umull v6.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v7.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: uaddw v0.4s, v2.4s, v6.4h
+; CHECK-GI-NEXT: uaddw2 v1.4s, v3.4s, v6.8h
+; CHECK-GI-NEXT: uaddw v2.4s, v4.4s, v7.4h
+; CHECK-GI-NEXT: uaddw2 v3.4s, v5.4s, v7.8h
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i32>
@@ -186,35 +172,21 @@ define <16 x i64> @mla_i64(<16 x i8> %a, <16 x i8> %b, <16 x i64> %c) {
;
; CHECK-GI-LABEL: mla_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: mov v16.16b, v2.16b
-; CHECK-GI-NEXT: mov v17.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
-; CHECK-GI-NEXT: mov v4.16b, v6.16b
-; CHECK-GI-NEXT: mov v5.16b, v7.16b
-; CHECK-GI-NEXT: ushll v6.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v7.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-NEXT: ushll v18.4s, v6.4h, #0
-; CHECK-GI-NEXT: ushll v20.4s, v7.4h, #0
-; CHECK-GI-NEXT: ushll2 v19.4s, v6.8h, #0
-; CHECK-GI-NEXT: ushll v21.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll2 v22.4s, v7.8h, #0
-; CHECK-GI-NEXT: ushll v23.4s, v1.4h, #0
-; CHECK-GI-NEXT: ldp q6, q7, [sp]
-; CHECK-GI-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v1.4s, v1.8h, #0
-; CHECK-GI-NEXT: umlal v16.2d, v18.2s, v20.2s
-; CHECK-GI-NEXT: umlal2 v17.2d, v18.4s, v20.4s
-; CHECK-GI-NEXT: umlal v2.2d, v19.2s, v22.2s
-; CHECK-GI-NEXT: umlal2 v3.2d, v19.4s, v22.4s
-; CHECK-GI-NEXT: umlal v4.2d, v21.2s, v23.2s
-; CHECK-GI-NEXT: umlal2 v5.2d, v21.4s, v23.4s
-; CHECK-GI-NEXT: umlal v6.2d, v0.2s, v1.2s
-; CHECK-GI-NEXT: umlal2 v7.2d, v0.4s, v1.4s
-; CHECK-GI-NEXT: mov v0.16b, v16.16b
-; CHECK-GI-NEXT: mov v1.16b, v17.16b
+; CHECK-GI-NEXT: umull v16.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: umull2 v0.8h, v0.16b, v1.16b
+; CHECK-GI-NEXT: ldp q19, q20, [sp]
+; CHECK-GI-NEXT: ushll v1.4s, v16.4h, #0
+; CHECK-GI-NEXT: ushll2 v16.4s, v16.8h, #0
+; CHECK-GI-NEXT: ushll v17.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v18.4s, v0.8h, #0
+; CHECK-GI-NEXT: uaddw v0.2d, v2.2d, v1.2s
+; CHECK-GI-NEXT: uaddw2 v1.2d, v3.2d, v1.4s
+; CHECK-GI-NEXT: uaddw v2.2d, v4.2d, v16.2s
+; CHECK-GI-NEXT: uaddw2 v3.2d, v5.2d, v16.4s
+; CHECK-GI-NEXT: uaddw v4.2d, v6.2d, v17.2s
+; CHECK-GI-NEXT: uaddw2 v5.2d, v7.2d, v17.4s
+; CHECK-GI-NEXT: uaddw v6.2d, v19.2d, v18.2s
+; CHECK-GI-NEXT: uaddw2 v7.2d, v20.2d, v18.4s
; CHECK-GI-NEXT: ret
entry:
%ea = zext <16 x i8> %a to <16 x i64>
diff --git a/llvm/test/CodeGen/AArch64/adc.ll b/llvm/test/CodeGen/AArch64/adc.ll
index 4b1393f..12e8bf2 100644
--- a/llvm/test/CodeGen/AArch64/adc.ll
+++ b/llvm/test/CodeGen/AArch64/adc.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 | FileCheck --check-prefix=CHECK-LE %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64_be-none-linux-gnu | FileCheck --check-prefix=CHECK-BE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 | FileCheck --check-prefixes=CHECK-LE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64_be-none-linux-gnu | FileCheck --check-prefixes=CHECK-BE %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=arm64-apple-ios7.0 -global-isel | FileCheck --check-prefixes=CHECK-GI %s
define i128 @test_simple(i128 %a, i128 %b, i128 %c) {
; CHECK-LE-LABEL: test_simple:
@@ -18,11 +19,16 @@ define i128 @test_simple(i128 %a, i128 %b, i128 %c) {
; CHECK-BE-NEXT: subs x1, x8, x5
; CHECK-BE-NEXT: sbc x0, x9, x4
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_simple:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: adds x8, x0, x2
+; CHECK-GI-NEXT: adc x9, x1, x3
+; CHECK-GI-NEXT: subs x0, x8, x4
+; CHECK-GI-NEXT: sbc x1, x9, x5
+; CHECK-GI-NEXT: ret
%valadd = add i128 %a, %b
-
%valsub = sub i128 %valadd, %c
-
ret i128 %valsub
}
@@ -38,9 +44,13 @@ define i128 @test_imm(i128 %a) {
; CHECK-BE-NEXT: adds x1, x1, #12
; CHECK-BE-NEXT: cinc x0, x0, hs
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_imm:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: adds x0, x0, #12
+; CHECK-GI-NEXT: adc x1, x1, xzr
+; CHECK-GI-NEXT: ret
%val = add i128 %a, 12
-
ret i128 %val
}
@@ -58,11 +68,16 @@ define i128 @test_shifted(i128 %a, i128 %b) {
; CHECK-BE-NEXT: adds x1, x1, x3, lsl #45
; CHECK-BE-NEXT: adc x0, x0, x8
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_shifted:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: lsr x8, x2, #19
+; CHECK-GI-NEXT: adds x0, x0, x2, lsl #45
+; CHECK-GI-NEXT: orr x8, x8, x3, lsl #45
+; CHECK-GI-NEXT: adc x1, x1, x8
+; CHECK-GI-NEXT: ret
%rhs = shl i128 %b, 45
-
%val = add i128 %a, %rhs
-
ret i128 %val
}
@@ -86,11 +101,19 @@ define i128 @test_extended(i128 %a, i16 %b) {
; CHECK-BE-NEXT: extr x8, x9, x8, #61
; CHECK-BE-NEXT: adc x0, x0, x8
; CHECK-BE-NEXT: ret
-
+;
+; CHECK-GI-LABEL: test_extended:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: ; kill: def $w2 killed $w2 def $x2
+; CHECK-GI-NEXT: sxth x8, w2
+; CHECK-GI-NEXT: adds x0, x0, w2, sxth #3
+; CHECK-GI-NEXT: asr x9, x8, #63
+; CHECK-GI-NEXT: lsr x8, x8, #61
+; CHECK-GI-NEXT: orr x8, x8, x9, lsl #3
+; CHECK-GI-NEXT: adc x1, x1, x8
+; CHECK-GI-NEXT: ret
%ext = sext i16 %b to i128
%rhs = shl i128 %ext, 3
-
%val = add i128 %a, %rhs
-
ret i128 %val
}
diff --git a/llvm/test/CodeGen/AArch64/addcarry-crash.ll b/llvm/test/CodeGen/AArch64/addcarry-crash.ll
index be75ab1..b4556c7 100644
--- a/llvm/test/CodeGen/AArch64/addcarry-crash.ll
+++ b/llvm/test/CodeGen/AArch64/addcarry-crash.ll
@@ -1,16 +1,29 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s | FileCheck %s
+; RUN: llc < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
target triple = "arm64-apple-ios7.0"
define i64 @foo(ptr nocapture readonly %ptr, i64 %a, i64 %b, i64 %c) local_unnamed_addr #0 {
-; CHECK-LABEL: foo:
-; CHECK: ; %bb.0: ; %entry
-; CHECK-NEXT: lsr x8, x1, #32
-; CHECK-NEXT: ldr w9, [x0, #4]
-; CHECK-NEXT: cmn x3, x2
-; CHECK-NEXT: umull x8, w9, w8
-; CHECK-NEXT: cinc x0, x8, hs
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: foo:
+; CHECK-SD: ; %bb.0: ; %entry
+; CHECK-SD-NEXT: lsr x8, x1, #32
+; CHECK-SD-NEXT: ldr w9, [x0, #4]
+; CHECK-SD-NEXT: cmn x3, x2
+; CHECK-SD-NEXT: umull x8, w9, w8
+; CHECK-SD-NEXT: cinc x0, x8, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: foo:
+; CHECK-GI: ; %bb.0: ; %entry
+; CHECK-GI-NEXT: ldr x8, [x0]
+; CHECK-GI-NEXT: lsr x9, x1, #32
+; CHECK-GI-NEXT: cmn x3, x2
+; CHECK-GI-NEXT: cset w10, hs
+; CHECK-GI-NEXT: lsr x8, x8, #32
+; CHECK-GI-NEXT: and x10, x10, #0x1
+; CHECK-GI-NEXT: umaddl x0, w8, w9, x10
+; CHECK-GI-NEXT: ret
entry:
%0 = lshr i64 %a, 32
%1 = load i64, ptr %ptr, align 8
@@ -24,3 +37,6 @@ entry:
}
attributes #0 = { norecurse nounwind readonly }
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index b325851..78881c8 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck -check-prefixes=CHECK,CHECK-SD %s
+; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s -check-prefixes=CHECK,CHECK-SD
; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define <8 x i16> @sabdl8h(ptr %A, ptr %B) nounwind {
diff --git a/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll b/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
index 634d1b9..5f5b27a 100644
--- a/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
+++ b/llvm/test/CodeGen/AArch64/avoid-free-ext-promotion.ll
@@ -59,37 +59,33 @@ bb27: ; preds = %bb9, %bb8
define void @avoid_promotion_2_and(ptr nocapture noundef %arg) {
; CHECK-LABEL: avoid_promotion_2_and:
; CHECK: ; %bb.0: ; %entry
-; CHECK-NEXT: add x8, x0, #32
-; CHECK-NEXT: b LBB1_2
-; CHECK-NEXT: LBB1_1: ; %latch
-; CHECK-NEXT: ; in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: cmp w9, #2
-; CHECK-NEXT: add x8, x8, #56
-; CHECK-NEXT: b.ls LBB1_4
-; CHECK-NEXT: LBB1_2: ; %loop
+; CHECK-NEXT: mov x8, xzr
+; CHECK-NEXT: add x9, x0, #32
+; CHECK-NEXT: LBB1_1: ; %loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ldr w9, [x8, #20]
-; CHECK-NEXT: cmp w9, #3
-; CHECK-NEXT: b.lo LBB1_1
-; CHECK-NEXT: ; %bb.3: ; %then
-; CHECK-NEXT: ; in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: ldp w13, w12, [x8, #12]
-; CHECK-NEXT: ldr w10, [x8]
+; CHECK-NEXT: ldr w10, [x9, #20]
+; CHECK-NEXT: cmp w10, #3
+; CHECK-NEXT: b.lo LBB1_3
+; CHECK-NEXT: ; %bb.2: ; %then
+; CHECK-NEXT: ; in Loop: Header=BB1_1 Depth=1
+; CHECK-NEXT: ldp w13, w12, [x9, #12]
+; CHECK-NEXT: ldr w10, [x9]
; CHECK-NEXT: ldr x11, [x0]
-; CHECK-NEXT: ldr w14, [x8, #8]
+; CHECK-NEXT: add x8, x8, #1
+; CHECK-NEXT: ldr w14, [x9, #8]
; CHECK-NEXT: lsl w10, w10, w13
; CHECK-NEXT: ldrb w11, [x11, x12]
; CHECK-NEXT: eor w10, w10, w11
-; CHECK-NEXT: ldur w11, [x8, #-24]
+; CHECK-NEXT: ldur w11, [x9, #-24]
; CHECK-NEXT: and w10, w10, w14
-; CHECK-NEXT: ldp x14, x13, [x8, #-16]
-; CHECK-NEXT: str w10, [x8]
+; CHECK-NEXT: ldp x14, x13, [x9, #-16]
+; CHECK-NEXT: str w10, [x9], #56
; CHECK-NEXT: and w11, w11, w12
; CHECK-NEXT: ldrh w15, [x13, w10, uxtw #1]
; CHECK-NEXT: strh w15, [x14, w11, uxtw #1]
; CHECK-NEXT: strh w12, [x13, w10, uxtw #1]
; CHECK-NEXT: b LBB1_1
-; CHECK-NEXT: LBB1_4: ; %exit
+; CHECK-NEXT: LBB1_3: ; %exit.critedge
; CHECK-NEXT: ret
entry:
br label %loop
diff --git a/llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..c4c54175
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple aarch64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..b47607e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeId value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple aarch64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..94b657c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple aarch64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
index 5765e0a..b3ce9d2 100644
--- a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
@@ -1,14 +1,21 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "arm64"
define i1 @test_EQ_IllEbT(i64 %a, i64 %b) {
-; CHECK-LABEL: test_EQ_IllEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, x1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IllEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, x1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IllEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn x1, x0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%add = sub i64 0, %b
%cmp = icmp eq i64 %add, %a
@@ -16,11 +23,19 @@ entry:
}
define i1 @test_EQ_IliEbT(i64 %a, i32 %b) {
-; CHECK-LABEL: test_EQ_IliEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, w1, sxtw
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IliEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, w1, sxtw
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IliEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-GI-NEXT: sxtw x8, w1
+; CHECK-GI-NEXT: cmn x8, x0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %b to i64
%add = sub i64 0, %a
@@ -55,11 +70,19 @@ entry:
}
define i1 @test_EQ_IilEbT(i32 %a, i64 %b) {
-; CHECK-LABEL: test_EQ_IilEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x1, w0, sxtw
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IilEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x1, w0, sxtw
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IilEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-GI-NEXT: sxtw x8, w0
+; CHECK-GI-NEXT: cmn x8, x1
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %a to i64
%add = sub i64 0, %b
@@ -68,11 +91,17 @@ entry:
}
define i1 @test_EQ_IiiEbT(i32 %a, i32 %b) {
-; CHECK-LABEL: test_EQ_IiiEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn w0, w1
-; CHECK-NEXT: cset w0, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_EQ_IiiEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn w0, w1
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_EQ_IiiEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn w1, w0
+; CHECK-GI-NEXT: cset w0, eq
+; CHECK-GI-NEXT: ret
entry:
%add = sub i32 0, %b
%cmp = icmp eq i32 %add, %a
@@ -218,11 +247,17 @@ entry:
}
define i1 @test_NE_IllEbT(i64 %a, i64 %b) {
-; CHECK-LABEL: test_NE_IllEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, x1
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IllEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, x1
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IllEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn x1, x0
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%add = sub i64 0, %b
%cmp = icmp ne i64 %add, %a
@@ -230,11 +265,19 @@ entry:
}
define i1 @test_NE_IliEbT(i64 %a, i32 %b) {
-; CHECK-LABEL: test_NE_IliEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x0, w1, sxtw
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IliEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x0, w1, sxtw
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IliEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w1 killed $w1 def $x1
+; CHECK-GI-NEXT: sxtw x8, w1
+; CHECK-GI-NEXT: cmn x8, x0
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %b to i64
%add = sub i64 0, %a
@@ -269,11 +312,19 @@ entry:
}
define i1 @test_NE_IilEbT(i32 %a, i64 %b) {
-; CHECK-LABEL: test_NE_IilEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn x1, w0, sxtw
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IilEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn x1, w0, sxtw
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IilEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: // kill: def $w0 killed $w0 def $x0
+; CHECK-GI-NEXT: sxtw x8, w0
+; CHECK-GI-NEXT: cmn x8, x1
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%conv = sext i32 %a to i64
%add = sub i64 0, %b
@@ -282,11 +333,17 @@ entry:
}
define i1 @test_NE_IiiEbT(i32 %a, i32 %b) {
-; CHECK-LABEL: test_NE_IiiEbT:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmn w0, w1
-; CHECK-NEXT: cset w0, ne
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_NE_IiiEbT:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmn w0, w1
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_NE_IiiEbT:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmn w1, w0
+; CHECK-GI-NEXT: cset w0, ne
+; CHECK-GI-NEXT: ret
entry:
%add = sub i32 0, %b
%cmp = icmp ne i32 %add, %a
@@ -444,161 +501,281 @@ define i1 @cmn_large_imm(i32 %a) {
}
define i1 @almost_immediate_neg_slt(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_slt:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, le
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_slt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_slt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, lt
+; CHECK-GI-NEXT: ret
%cmp = icmp slt i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_slt_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_slt_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, le
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_slt_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_slt_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, lt
+; CHECK-GI-NEXT: ret
%cmp = icmp slt i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_sge(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_sge:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, gt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sge:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sge:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, ge
+; CHECK-GI-NEXT: ret
%cmp = icmp sge i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_sge_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_sge_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, gt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sge_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sge_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, ge
+; CHECK-GI-NEXT: ret
%cmp = icmp sge i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_uge(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_uge:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, hi
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_uge:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_uge:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, hs
+; CHECK-GI-NEXT: ret
%cmp = icmp uge i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_uge_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_uge_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, hi
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_uge_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_uge_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, hs
+; CHECK-GI-NEXT: ret
%cmp = icmp uge i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_ult(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_ult:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, ls
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ult:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ult:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #4097 // =0x1001
+; CHECK-GI-NEXT: movk w8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%cmp = icmp ult i32 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_ult_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_ult_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
-; CHECK-NEXT: cset w0, ls
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ult_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ult_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-61439 // =0xffffffffffff1001
+; CHECK-GI-NEXT: movk x8, #65281, lsl #16
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, lo
+; CHECK-GI-NEXT: ret
%cmp = icmp ult i64 %x, -16707583
ret i1 %cmp
}
define i1 @almost_immediate_neg_sle(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_sle:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sle:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sle:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, le
+; CHECK-GI-NEXT: ret
%cmp = icmp sle i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_sle_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_sle_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lt
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sle_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sle_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, le
+; CHECK-GI-NEXT: ret
%cmp = icmp sle i64 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_sgt(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_sgt:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, ge
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sgt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sgt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, gt
+; CHECK-GI-NEXT: ret
%cmp = icmp sgt i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_sgt_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_sgt_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, ge
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_sgt_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_sgt_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, gt
+; CHECK-GI-NEXT: ret
%cmp = icmp sgt i64 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ule(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_ule:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lo
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ule:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ule:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, ls
+; CHECK-GI-NEXT: ret
%cmp = icmp ule i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ule_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_ule_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, lo
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ule_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ule_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, ls
+; CHECK-GI-NEXT: ret
%cmp = icmp ule i64 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ugt(i32 %x) {
-; CHECK-LABEL: almost_immediate_neg_ugt:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, hs
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ugt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ugt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #-16773121 // =0xff000fff
+; CHECK-GI-NEXT: cmp w0, w8
+; CHECK-GI-NEXT: cset w0, hi
+; CHECK-GI-NEXT: ret
%cmp = icmp ugt i32 %x, -16773121
ret i1 %cmp
}
define i1 @almost_immediate_neg_ugt_64(i64 %x) {
-; CHECK-LABEL: almost_immediate_neg_ugt_64:
-; CHECK: // %bb.0:
-; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
-; CHECK-NEXT: cset w0, hs
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: almost_immediate_neg_ugt_64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: almost_immediate_neg_ugt_64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov x8, #-16773121 // =0xffffffffff000fff
+; CHECK-GI-NEXT: cmp x0, x8
+; CHECK-GI-NEXT: cset w0, hi
+; CHECK-GI-NEXT: ret
%cmp = icmp ugt i64 %x, -16773121
ret i1 %cmp
}
@@ -637,6 +814,24 @@ define i1 @cmn_nsw_neg(i32 %a, i32 %b) {
ret i1 %cmp
}
+define i1 @cmn_swap(i32 %a, i32 %b) {
+; CHECK-SD-LABEL: cmn_swap:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmn w0, w1
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: cmn_swap:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmn w1, w0
+; CHECK-GI-NEXT: cset w0, lt
+; CHECK-GI-NEXT: ret
+ %sub = sub nsw i32 0, %b
+ %cmp = icmp sgt i32 %sub, %a
+ ret i1 %cmp
+}
+
+
define i1 @cmn_nsw_neg_64(i64 %a, i64 %b) {
; CHECK-LABEL: cmn_nsw_neg_64:
; CHECK: // %bb.0:
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
index 880bd29..d67aa08 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-predicated-scalable.ll
@@ -14,20 +14,19 @@ target triple = "aarch64"
define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
; CHECK-LABEL: complex_mul_v2f64:
; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: mov w8, #100 // =0x64
-; CHECK-NEXT: cntd x9
; CHECK-NEXT: whilelo p1.d, xzr, x8
+; CHECK-NEXT: cntd x9
; CHECK-NEXT: rdvl x10, #2
-; CHECK-NEXT: mov x11, x9
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
-; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
+; CHECK-NEXT: mov x11, x9
; CHECK-NEXT: .LBB0_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: zip2 p2.d, p1.d, p1.d
-; CHECK-NEXT: mov z6.d, z1.d
-; CHECK-NEXT: mov z7.d, z0.d
+; CHECK-NEXT: mov z6.d, z0.d
+; CHECK-NEXT: mov z7.d, z1.d
; CHECK-NEXT: zip1 p1.d, p1.d, p1.d
; CHECK-NEXT: ld1d { z2.d }, p2/z, [x0, #1, mul vl]
; CHECK-NEXT: ld1d { z4.d }, p2/z, [x1, #1, mul vl]
@@ -39,14 +38,14 @@ define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #0
; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #90
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #90
-; CHECK-NEXT: mov z0.d, p2/m, z7.d
-; CHECK-NEXT: mov z1.d, p1/m, z6.d
+; CHECK-NEXT: mov z1.d, p2/m, z7.d
+; CHECK-NEXT: mov z0.d, p1/m, z6.d
; CHECK-NEXT: whilelo p1.d, x11, x8
; CHECK-NEXT: add x11, x11, x9
; CHECK-NEXT: b.mi .LBB0_1
; CHECK-NEXT: // %bb.2: // %exit.block
-; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d
-; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d
+; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d
+; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d
; CHECK-NEXT: faddv d0, p0, z2.d
; CHECK-NEXT: faddv d1, p0, z1.d
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
@@ -111,21 +110,20 @@ exit.block: ; preds = %vector.body
define %"class.std::complex" @complex_mul_predicated_v2f64(ptr %a, ptr %b, ptr %cond) {
; CHECK-LABEL: complex_mul_predicated_v2f64:
; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: cntd x9
-; CHECK-NEXT: mov w11, #100 // =0x64
; CHECK-NEXT: neg x10, x9
+; CHECK-NEXT: mov w11, #100 // =0x64
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, xzr
; CHECK-NEXT: and x10, x10, x11
; CHECK-NEXT: rdvl x11, #2
-; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
-; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
; CHECK-NEXT: .LBB1_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ld1w { z2.d }, p0/z, [x2, x8, lsl #2]
-; CHECK-NEXT: mov z6.d, z1.d
-; CHECK-NEXT: mov z7.d, z0.d
+; CHECK-NEXT: mov z6.d, z0.d
+; CHECK-NEXT: mov z7.d, z1.d
; CHECK-NEXT: add x8, x8, x9
; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, #0
; CHECK-NEXT: cmp x10, x8
@@ -141,12 +139,12 @@ define %"class.std::complex" @complex_mul_predicated_v2f64(ptr %a, ptr %b, ptr %
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #0
; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #90
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #90
-; CHECK-NEXT: mov z0.d, p2/m, z7.d
-; CHECK-NEXT: mov z1.d, p1/m, z6.d
+; CHECK-NEXT: mov z1.d, p2/m, z7.d
+; CHECK-NEXT: mov z0.d, p1/m, z6.d
; CHECK-NEXT: b.ne .LBB1_1
; CHECK-NEXT: // %bb.2: // %exit.block
-; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d
-; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d
+; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d
+; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d
; CHECK-NEXT: faddv d0, p0, z2.d
; CHECK-NEXT: faddv d1, p0, z1.d
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
@@ -213,21 +211,20 @@ exit.block: ; preds = %vector.body
define %"class.std::complex" @complex_mul_predicated_x2_v2f64(ptr %a, ptr %b, ptr %cond) {
; CHECK-LABEL: complex_mul_predicated_x2_v2f64:
; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: mov w8, #100 // =0x64
-; CHECK-NEXT: cntd x9
; CHECK-NEXT: whilelo p1.d, xzr, x8
+; CHECK-NEXT: cntd x9
; CHECK-NEXT: rdvl x10, #2
-; CHECK-NEXT: cnth x11
; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: cnth x11
; CHECK-NEXT: mov x12, x9
-; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
-; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
; CHECK-NEXT: .LBB2_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ld1w { z2.d }, p1/z, [x2]
-; CHECK-NEXT: mov z6.d, z1.d
-; CHECK-NEXT: mov z7.d, z0.d
+; CHECK-NEXT: mov z6.d, z0.d
+; CHECK-NEXT: mov z7.d, z1.d
; CHECK-NEXT: add x2, x2, x11
; CHECK-NEXT: and z2.d, z2.d, #0xffffffff
; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
@@ -243,14 +240,14 @@ define %"class.std::complex" @complex_mul_predicated_x2_v2f64(ptr %a, ptr %b, pt
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #0
; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #90
; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #90
-; CHECK-NEXT: mov z0.d, p2/m, z7.d
-; CHECK-NEXT: mov z1.d, p1/m, z6.d
+; CHECK-NEXT: mov z1.d, p2/m, z7.d
+; CHECK-NEXT: mov z0.d, p1/m, z6.d
; CHECK-NEXT: whilelo p1.d, x12, x8
; CHECK-NEXT: add x12, x12, x9
; CHECK-NEXT: b.mi .LBB2_1
; CHECK-NEXT: // %bb.2: // %exit.block
-; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d
-; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d
+; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d
+; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d
; CHECK-NEXT: faddv d0, p0, z2.d
; CHECK-NEXT: faddv d1, p0, z1.d
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
index 29be231..0646ca4 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-reductions-scalable.ll
@@ -14,15 +14,14 @@ target triple = "aarch64"
define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
; CHECK-LABEL: complex_mul_v2f64:
; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: cntd x8
-; CHECK-NEXT: mov w10, #100 // =0x64
; CHECK-NEXT: neg x9, x8
+; CHECK-NEXT: mov w10, #100 // =0x64
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: and x9, x9, x10
; CHECK-NEXT: rdvl x10, #2
-; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
-; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
; CHECK-NEXT: .LBB0_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr z2, [x0, #1, mul vl]
@@ -32,14 +31,14 @@ define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
; CHECK-NEXT: ldr z5, [x1]
; CHECK-NEXT: add x1, x1, x10
; CHECK-NEXT: add x0, x0, x10
-; CHECK-NEXT: fcmla z1.d, p0/m, z5.d, z3.d, #0
-; CHECK-NEXT: fcmla z0.d, p0/m, z4.d, z2.d, #0
-; CHECK-NEXT: fcmla z1.d, p0/m, z5.d, z3.d, #90
-; CHECK-NEXT: fcmla z0.d, p0/m, z4.d, z2.d, #90
+; CHECK-NEXT: fcmla z0.d, p0/m, z5.d, z3.d, #0
+; CHECK-NEXT: fcmla z1.d, p0/m, z4.d, z2.d, #0
+; CHECK-NEXT: fcmla z0.d, p0/m, z5.d, z3.d, #90
+; CHECK-NEXT: fcmla z1.d, p0/m, z4.d, z2.d, #90
; CHECK-NEXT: b.ne .LBB0_1
; CHECK-NEXT: // %bb.2: // %exit.block
-; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d
-; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d
+; CHECK-NEXT: uzp1 z2.d, z0.d, z1.d
+; CHECK-NEXT: uzp2 z1.d, z0.d, z1.d
; CHECK-NEXT: faddv d0, p0, z2.d
; CHECK-NEXT: faddv d1, p0, z1.d
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
@@ -183,17 +182,16 @@ exit.block: ; preds = %vector.body
define %"class.std::complex" @complex_mul_v2f64_unrolled(ptr %a, ptr %b) {
; CHECK-LABEL: complex_mul_v2f64_unrolled:
; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: cntw x8
-; CHECK-NEXT: mov w10, #1000 // =0x3e8
+; CHECK-NEXT: movi v2.2d, #0000000000000000
+; CHECK-NEXT: movi v3.2d, #0000000000000000
; CHECK-NEXT: neg x9, x8
+; CHECK-NEXT: mov w10, #1000 // =0x3e8
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: and x9, x9, x10
; CHECK-NEXT: rdvl x10, #4
-; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
-; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
-; CHECK-NEXT: mov z2.d, z1.d
-; CHECK-NEXT: mov z3.d, z0.d
; CHECK-NEXT: .LBB2_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr z4, [x0, #1, mul vl]
@@ -207,20 +205,20 @@ define %"class.std::complex" @complex_mul_v2f64_unrolled(ptr %a, ptr %b) {
; CHECK-NEXT: ldr z18, [x1, #3, mul vl]
; CHECK-NEXT: ldr z19, [x1, #2, mul vl]
; CHECK-NEXT: add x1, x1, x10
-; CHECK-NEXT: fcmla z1.d, p0/m, z16.d, z5.d, #0
-; CHECK-NEXT: fcmla z0.d, p0/m, z7.d, z4.d, #0
+; CHECK-NEXT: fcmla z0.d, p0/m, z16.d, z5.d, #0
+; CHECK-NEXT: fcmla z1.d, p0/m, z7.d, z4.d, #0
; CHECK-NEXT: fcmla z3.d, p0/m, z18.d, z6.d, #0
; CHECK-NEXT: fcmla z2.d, p0/m, z19.d, z17.d, #0
-; CHECK-NEXT: fcmla z1.d, p0/m, z16.d, z5.d, #90
-; CHECK-NEXT: fcmla z0.d, p0/m, z7.d, z4.d, #90
+; CHECK-NEXT: fcmla z0.d, p0/m, z16.d, z5.d, #90
+; CHECK-NEXT: fcmla z1.d, p0/m, z7.d, z4.d, #90
; CHECK-NEXT: fcmla z3.d, p0/m, z18.d, z6.d, #90
; CHECK-NEXT: fcmla z2.d, p0/m, z19.d, z17.d, #90
; CHECK-NEXT: b.ne .LBB2_1
; CHECK-NEXT: // %bb.2: // %exit.block
; CHECK-NEXT: uzp1 z4.d, z2.d, z3.d
-; CHECK-NEXT: uzp1 z5.d, z1.d, z0.d
+; CHECK-NEXT: uzp1 z5.d, z0.d, z1.d
; CHECK-NEXT: uzp2 z2.d, z2.d, z3.d
-; CHECK-NEXT: uzp2 z0.d, z1.d, z0.d
+; CHECK-NEXT: uzp2 z0.d, z0.d, z1.d
; CHECK-NEXT: fadd z1.d, z4.d, z5.d
; CHECK-NEXT: fadd z2.d, z2.d, z0.d
; CHECK-NEXT: faddv d0, p0, z1.d
@@ -310,15 +308,15 @@ define dso_local %"class.std::complex" @reduction_mix(ptr %a, ptr %b, ptr noalia
; CHECK-LABEL: reduction_mix:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: movi v2.2d, #0000000000000000
+; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: cntd x9
-; CHECK-NEXT: mov w11, #100 // =0x64
+; CHECK-NEXT: movi v1.2d, #0000000000000000
; CHECK-NEXT: neg x10, x9
+; CHECK-NEXT: mov w11, #100 // =0x64
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, xzr
; CHECK-NEXT: and x10, x10, x11
; CHECK-NEXT: rdvl x11, #2
-; CHECK-NEXT: zip2 z0.d, z2.d, z2.d
-; CHECK-NEXT: zip1 z1.d, z2.d, z2.d
; CHECK-NEXT: .LBB3_1: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr z3, [x0]
@@ -327,13 +325,13 @@ define dso_local %"class.std::complex" @reduction_mix(ptr %a, ptr %b, ptr noalia
; CHECK-NEXT: ld1w { z5.d }, p0/z, [x3, x8, lsl #2]
; CHECK-NEXT: add x8, x8, x9
; CHECK-NEXT: cmp x10, x8
-; CHECK-NEXT: fadd z0.d, z4.d, z0.d
-; CHECK-NEXT: fadd z1.d, z3.d, z1.d
+; CHECK-NEXT: fadd z1.d, z4.d, z1.d
+; CHECK-NEXT: fadd z0.d, z3.d, z0.d
; CHECK-NEXT: add z2.d, z5.d, z2.d
; CHECK-NEXT: b.ne .LBB3_1
; CHECK-NEXT: // %bb.2: // %middle.block
-; CHECK-NEXT: uzp2 z3.d, z1.d, z0.d
-; CHECK-NEXT: uzp1 z1.d, z1.d, z0.d
+; CHECK-NEXT: uzp2 z3.d, z0.d, z1.d
+; CHECK-NEXT: uzp1 z1.d, z0.d, z1.d
; CHECK-NEXT: uaddv d2, p0, z2.d
; CHECK-NEXT: faddv d0, p0, z3.d
; CHECK-NEXT: faddv d1, p0, z1.d
diff --git a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
index a9618fd..05ecc9e 100644
--- a/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/fixed-vector-interleave.ll
@@ -131,18 +131,83 @@ define <4 x i64> @interleave2_v4i64(<2 x i64> %vec0, <2 x i64> %vec1) {
ret <4 x i64> %retval
}
+define <4 x i16> @interleave2_same_const_splat_v4i16() {
+; CHECK-SD-LABEL: interleave2_same_const_splat_v4i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: movi v0.4h, #3
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_same_const_splat_v4i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #3 // =0x3
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: mov v0.h[1], w8
+; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v0.4h
+; CHECK-GI-NEXT: ret
+ %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> splat(i16 3), <2 x i16> splat(i16 3))
+ ret <4 x i16> %retval
+}
+
+define <4 x i16> @interleave2_diff_const_splat_v4i16() {
+; CHECK-SD-LABEL: interleave2_diff_const_splat_v4i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: adrp x8, .LCPI11_0
+; CHECK-SD-NEXT: ldr d0, [x8, :lo12:.LCPI11_0]
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_diff_const_splat_v4i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #3 // =0x3
+; CHECK-GI-NEXT: mov w9, #4 // =0x4
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: mov v0.h[1], w8
+; CHECK-GI-NEXT: mov v1.h[1], w9
+; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: ret
+ %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> splat(i16 3), <2 x i16> splat(i16 4))
+ ret <4 x i16> %retval
+}
-; Float declarations
-declare <4 x half> @llvm.vector.interleave2.v4f16(<2 x half>, <2 x half>)
-declare <8 x half> @llvm.vector.interleave2.v8f16(<4 x half>, <4 x half>)
-declare <16 x half> @llvm.vector.interleave2.v16f16(<8 x half>, <8 x half>)
-declare <4 x float> @llvm.vector.interleave2.v4f32(<2 x float>, <2 x float>)
-declare <8 x float> @llvm.vector.interleave2.v8f32(<4 x float>, <4 x float>)
-declare <4 x double> @llvm.vector.interleave2.v4f64(<2 x double>, <2 x double>)
-
-; Integer declarations
-declare <32 x i8> @llvm.vector.interleave2.v32i8(<16 x i8>, <16 x i8>)
-declare <16 x i16> @llvm.vector.interleave2.v16i16(<8 x i16>, <8 x i16>)
-declare <8 x i32> @llvm.vector.interleave2.v8i32(<4 x i32>, <4 x i32>)
-declare <4 x i64> @llvm.vector.interleave2.v4i64(<2 x i64>, <2 x i64>)
+define <4 x i16> @interleave2_same_nonconst_splat_v4i16(i16 %a) {
+; CHECK-SD-LABEL: interleave2_same_nonconst_splat_v4i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: dup v0.4h, w0
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_same_nonconst_splat_v4i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: dup v0.4h, w0
+; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v0.4h
+; CHECK-GI-NEXT: ret
+ %ins = insertelement <2 x i16> poison, i16 %a, i32 0
+ %splat = shufflevector <2 x i16> %ins, <2 x i16> poison, <2 x i32> <i32 0, i32 0>
+ %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> %splat, <2 x i16> %splat)
+ ret <4 x i16> %retval
+}
+
+define <4 x i16> @interleave2_diff_nonconst_splat_v4i16(i16 %a, i16 %b) {
+; CHECK-SD-LABEL: interleave2_diff_nonconst_splat_v4i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: mov v0.h[1], w0
+; CHECK-SD-NEXT: mov v0.h[2], w1
+; CHECK-SD-NEXT: mov v0.h[3], w1
+; CHECK-SD-NEXT: rev32 v1.4h, v0.4h
+; CHECK-SD-NEXT: uzp1 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: interleave2_diff_nonconst_splat_v4i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: dup v0.4h, w0
+; CHECK-GI-NEXT: dup v1.4h, w1
+; CHECK-GI-NEXT: zip1 v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: ret
+ %ins1 = insertelement <2 x i16> poison, i16 %a, i32 0
+ %splat1 = shufflevector <2 x i16> %ins1, <2 x i16> poison, <2 x i32> <i32 0, i32 0>
+ %ins2 = insertelement <2 x i16> poison, i16 %b, i32 0
+ %splat2 = shufflevector <2 x i16> %ins2, <2 x i16> poison, <2 x i32> <i32 0, i32 0>
+ %retval = call <4 x i16> @llvm.vector.interleave2.v4i16(<2 x i16> %splat1, <2 x i16> %splat2)
+ ret <4 x i16> %retval
+}
diff --git a/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll b/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll
index c4a027c..381904f 100644
--- a/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll
+++ b/llvm/test/CodeGen/AArch64/late-taildup-computed-goto.ll
@@ -25,77 +25,58 @@ define void @test_interp(ptr %frame, ptr %dst) {
; CHECK-NEXT: adrp x21, _opcode.targets@PAGE
; CHECK-NEXT: Lloh1:
; CHECK-NEXT: add x21, x21, _opcode.targets@PAGEOFF
-; CHECK-NEXT: mov x22, xzr
+; CHECK-NEXT: mov x24, xzr
; CHECK-NEXT: add x8, x21, xzr, lsl #3
; CHECK-NEXT: mov x19, x1
; CHECK-NEXT: mov x20, x0
-; CHECK-NEXT: add x23, x22, #1
+; CHECK-NEXT: mov x23, xzr
+; CHECK-NEXT: mov w22, #1 ; =0x1
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: Ltmp0: ; Block address taken
; CHECK-NEXT: LBB0_1: ; %loop.header
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: add x8, x21, x23, lsl #3
+; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: mov x20, xzr
-; CHECK-NEXT: mov x22, xzr
-; CHECK-NEXT: add x23, x23, #1
+; CHECK-NEXT: mov x23, xzr
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: Ltmp1: ; Block address taken
; CHECK-NEXT: LBB0_2: ; %op1.bb
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: str xzr, [x19]
-; CHECK-NEXT: mov w8, #1 ; =0x1
+; CHECK-NEXT: Ltmp2: ; Block address taken
+; CHECK-NEXT: LBB0_3: ; %op6.bb
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr x0, [x20, #-8]!
-; CHECK-NEXT: ldr x9, [x0, #8]
-; CHECK-NEXT: str x8, [x0]
-; CHECK-NEXT: ldr x8, [x9, #48]
+; CHECK-NEXT: ldr x8, [x0, #8]
+; CHECK-NEXT: str x22, [x0]
+; CHECK-NEXT: ldr x8, [x8, #48]
; CHECK-NEXT: blr x8
-; CHECK-NEXT: add x8, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
+; CHECK-NEXT: add x8, x21, x24, lsl #3
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
-; CHECK-NEXT: Ltmp2: ; Block address taken
-; CHECK-NEXT: LBB0_3: ; %op2.bb
+; CHECK-NEXT: Ltmp3: ; Block address taken
+; CHECK-NEXT: LBB0_4: ; %op2.bb
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: add x8, x21, x23, lsl #3
+; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: mov x20, xzr
-; CHECK-NEXT: add x23, x23, #1
-; CHECK-NEXT: str x22, [x19]
-; CHECK-NEXT: mov x22, xzr
+; CHECK-NEXT: str x23, [x19]
+; CHECK-NEXT: mov x23, xzr
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
-; CHECK-NEXT: Ltmp3: ; Block address taken
-; CHECK-NEXT: LBB0_4: ; %op4.bb
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: str x22, [x19]
-; CHECK-NEXT: add x10, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
-; CHECK-NEXT: ldur x8, [x22, #12]
-; CHECK-NEXT: ldur x9, [x20, #-8]
-; CHECK-NEXT: add x22, x22, #20
-; CHECK-NEXT: stp x8, x9, [x20, #-8]
-; CHECK-NEXT: add x20, x20, #8
-; CHECK-NEXT: br x10
; CHECK-NEXT: Ltmp4: ; Block address taken
-; CHECK-NEXT: LBB0_5: ; %op5.bb
+; CHECK-NEXT: LBB0_5: ; %op4.bb
+; CHECK-NEXT: Ltmp5: ; Block address taken
+; CHECK-NEXT: LBB0_6: ; %op5.bb
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: str x22, [x19]
-; CHECK-NEXT: add x10, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
-; CHECK-NEXT: ldur x8, [x22, #12]
+; CHECK-NEXT: str x23, [x19]
+; CHECK-NEXT: ldur x8, [x23, #12]
; CHECK-NEXT: ldur x9, [x20, #-8]
-; CHECK-NEXT: add x22, x22, #20
+; CHECK-NEXT: add x23, x23, #20
; CHECK-NEXT: stp x8, x9, [x20, #-8]
+; CHECK-NEXT: add x8, x21, x24, lsl #3
; CHECK-NEXT: add x20, x20, #8
-; CHECK-NEXT: br x10
-; CHECK-NEXT: Ltmp5: ; Block address taken
-; CHECK-NEXT: LBB0_6: ; %op6.bb
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ldr x0, [x20, #-8]!
-; CHECK-NEXT: mov w8, #1 ; =0x1
-; CHECK-NEXT: ldr x9, [x0, #8]
-; CHECK-NEXT: str x8, [x0]
-; CHECK-NEXT: ldr x8, [x9, #48]
-; CHECK-NEXT: blr x8
-; CHECK-NEXT: add x8, x21, x23, lsl #3
-; CHECK-NEXT: add x23, x23, #1
+; CHECK-NEXT: add x24, x24, #1
; CHECK-NEXT: br x8
; CHECK-NEXT: .loh AdrpAdd Lloh0, Lloh1
entry:
diff --git a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
index 4f0c408..048e988 100644
--- a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
+++ b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
@@ -28,46 +28,28 @@ define i32 @test_udot_v4i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
;
; CHECK-GI-LABEL: test_udot_v4i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ldr w8, [x0]
-; CHECK-GI-NEXT: ldr w9, [x1]
+; CHECK-GI-NEXT: ldr w8, [x1]
+; CHECK-GI-NEXT: ldr w9, [x0]
; CHECK-GI-NEXT: fmov s0, w8
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: uxtb w8, w8
-; CHECK-GI-NEXT: uxtb w9, w9
-; CHECK-GI-NEXT: mov b1, v0.b[1]
-; CHECK-GI-NEXT: mov b3, v0.b[2]
-; CHECK-GI-NEXT: mov b5, v2.b[2]
-; CHECK-GI-NEXT: mov b4, v0.b[3]
-; CHECK-GI-NEXT: mov b0, v2.b[1]
-; CHECK-GI-NEXT: mov b6, v2.b[3]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: fmov w10, s1
-; CHECK-GI-NEXT: fmov w11, s3
-; CHECK-GI-NEXT: fmov s1, w8
-; CHECK-GI-NEXT: fmov w13, s5
-; CHECK-GI-NEXT: fmov w8, s4
-; CHECK-GI-NEXT: fmov w12, s0
-; CHECK-GI-NEXT: uxtb w10, w10
-; CHECK-GI-NEXT: uxtb w11, w11
-; CHECK-GI-NEXT: uxtb w13, w13
-; CHECK-GI-NEXT: uxtb w8, w8
-; CHECK-GI-NEXT: uxtb w12, w12
-; CHECK-GI-NEXT: mov v1.h[1], w10
-; CHECK-GI-NEXT: fmov w10, s6
-; CHECK-GI-NEXT: fmov s0, w11
-; CHECK-GI-NEXT: fmov s3, w13
-; CHECK-GI-NEXT: mov v2.h[1], w12
-; CHECK-GI-NEXT: uxtb w10, w10
-; CHECK-GI-NEXT: mov v0.h[1], w8
-; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT: mov v3.h[1], w10
-; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0
-; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0
-; CHECK-GI-NEXT: mov v1.d[1], v0.d[0]
-; CHECK-GI-NEXT: mov v2.d[1], v3.d[0]
-; CHECK-GI-NEXT: mul v0.4s, v2.4s, v1.4s
-; CHECK-GI-NEXT: addv s0, v0.4s
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov v3.b[0], v0.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[1]
+; CHECK-GI-NEXT: mov v5.b[0], v1.b[0]
+; CHECK-GI-NEXT: mov v3.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b0, v0.b[3]
+; CHECK-GI-NEXT: mov v5.b[1], v4.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b1, v1.b[3]
+; CHECK-GI-NEXT: mov v3.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v5.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v3.b[3], v0.b[0]
+; CHECK-GI-NEXT: mov v5.b[3], v1.b[0]
+; CHECK-GI-NEXT: ushll v0.8h, v3.8b, #0
+; CHECK-GI-NEXT: ushll v1.8h, v5.8b, #0
+; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: uaddlv s0, v0.4h
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -128,46 +110,28 @@ define i32 @test_sdot_v4i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
;
; CHECK-GI-LABEL: test_sdot_v4i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ldr w8, [x0]
-; CHECK-GI-NEXT: ldr w9, [x1]
+; CHECK-GI-NEXT: ldr w8, [x1]
+; CHECK-GI-NEXT: ldr w9, [x0]
; CHECK-GI-NEXT: fmov s0, w8
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov b1, v0.b[1]
-; CHECK-GI-NEXT: mov b3, v0.b[2]
-; CHECK-GI-NEXT: mov b5, v2.b[2]
-; CHECK-GI-NEXT: mov b4, v0.b[3]
-; CHECK-GI-NEXT: mov b0, v2.b[1]
-; CHECK-GI-NEXT: mov b6, v2.b[3]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: fmov w10, s1
-; CHECK-GI-NEXT: fmov w11, s3
-; CHECK-GI-NEXT: fmov s1, w8
-; CHECK-GI-NEXT: fmov w13, s5
-; CHECK-GI-NEXT: fmov w8, s4
-; CHECK-GI-NEXT: fmov w12, s0
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v1.h[1], w10
-; CHECK-GI-NEXT: fmov w10, s6
-; CHECK-GI-NEXT: fmov s0, w11
-; CHECK-GI-NEXT: fmov s3, w13
-; CHECK-GI-NEXT: mov v2.h[1], w12
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v0.h[1], w8
-; CHECK-GI-NEXT: sshll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT: mov v3.h[1], w10
-; CHECK-GI-NEXT: sshll v2.4s, v2.4h, #0
-; CHECK-GI-NEXT: sshll v0.4s, v0.4h, #0
-; CHECK-GI-NEXT: sshll v3.4s, v3.4h, #0
-; CHECK-GI-NEXT: mov v1.d[1], v0.d[0]
-; CHECK-GI-NEXT: mov v2.d[1], v3.d[0]
-; CHECK-GI-NEXT: mul v0.4s, v2.4s, v1.4s
-; CHECK-GI-NEXT: addv s0, v0.4s
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov v3.b[0], v0.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[1]
+; CHECK-GI-NEXT: mov v5.b[0], v1.b[0]
+; CHECK-GI-NEXT: mov v3.b[1], v2.b[0]
+; CHECK-GI-NEXT: mov b2, v0.b[2]
+; CHECK-GI-NEXT: mov b0, v0.b[3]
+; CHECK-GI-NEXT: mov v5.b[1], v4.b[0]
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b1, v1.b[3]
+; CHECK-GI-NEXT: mov v3.b[2], v2.b[0]
+; CHECK-GI-NEXT: mov v5.b[2], v4.b[0]
+; CHECK-GI-NEXT: mov v3.b[3], v0.b[0]
+; CHECK-GI-NEXT: mov v5.b[3], v1.b[0]
+; CHECK-GI-NEXT: sshll v0.8h, v3.8b, #0
+; CHECK-GI-NEXT: sshll v1.8h, v5.8b, #0
+; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: saddlv s0, v0.4h
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -205,22 +169,18 @@ define i32 @test_sdot_v4i8_double(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8
;
; CHECK-GI-LABEL: test_sdot_v4i8_double:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v1.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll v2.4s, v2.4h, #0
-; CHECK-GI-NEXT: ushll v3.4s, v3.4h, #0
-; CHECK-GI-NEXT: shl v0.4s, v0.4s, #24
-; CHECK-GI-NEXT: shl v1.4s, v1.4s, #24
-; CHECK-GI-NEXT: shl v2.4s, v2.4s, #24
-; CHECK-GI-NEXT: shl v3.4s, v3.4s, #24
-; CHECK-GI-NEXT: sshr v0.4s, v0.4s, #24
-; CHECK-GI-NEXT: sshr v1.4s, v1.4s, #24
-; CHECK-GI-NEXT: sshr v2.4s, v2.4s, #24
-; CHECK-GI-NEXT: sshr v3.4s, v3.4s, #24
-; CHECK-GI-NEXT: mul v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: mul v1.4s, v2.4s, v3.4s
-; CHECK-GI-NEXT: addv s0, v0.4s
-; CHECK-GI-NEXT: addv s1, v1.4s
+; CHECK-GI-NEXT: shl v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: shl v1.4h, v1.4h, #8
+; CHECK-GI-NEXT: shl v2.4h, v2.4h, #8
+; CHECK-GI-NEXT: shl v3.4h, v3.4h, #8
+; CHECK-GI-NEXT: sshr v0.4h, v0.4h, #8
+; CHECK-GI-NEXT: sshr v1.4h, v1.4h, #8
+; CHECK-GI-NEXT: sshr v2.4h, v2.4h, #8
+; CHECK-GI-NEXT: sshr v3.4h, v3.4h, #8
+; CHECK-GI-NEXT: mul v0.4h, v0.4h, v1.4h
+; CHECK-GI-NEXT: mul v1.4h, v2.4h, v3.4h
+; CHECK-GI-NEXT: saddlv s0, v0.4h
+; CHECK-GI-NEXT: saddlv s1, v1.4h
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: fmov w9, s1
; CHECK-GI-NEXT: add w0, w8, w9
@@ -414,31 +374,60 @@ define i32 @test_udot_v5i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: ldr d0, [x0]
; CHECK-GI-NEXT: ldr d1, [x1]
-; CHECK-GI-NEXT: umov w8, v1.b[4]
-; CHECK-GI-NEXT: umov w9, v0.b[4]
-; CHECK-GI-NEXT: umov w10, v1.b[0]
-; CHECK-GI-NEXT: umov w12, v0.b[0]
-; CHECK-GI-NEXT: umov w11, v1.b[1]
-; CHECK-GI-NEXT: umov w13, v0.b[1]
-; CHECK-GI-NEXT: mul w8, w8, w9
-; CHECK-GI-NEXT: fmov s2, w10
-; CHECK-GI-NEXT: umov w9, v1.b[2]
-; CHECK-GI-NEXT: fmov s3, w12
-; CHECK-GI-NEXT: umov w10, v1.b[3]
-; CHECK-GI-NEXT: fmov s4, w8
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: umov w8, v0.b[2]
-; CHECK-GI-NEXT: mov v3.s[1], w13
-; CHECK-GI-NEXT: umov w11, v0.b[3]
-; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: mov v2.s[2], w9
-; CHECK-GI-NEXT: mov v3.s[2], w8
-; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v2.s[3], w10
-; CHECK-GI-NEXT: mov v3.s[3], w11
-; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mla v4.4s, v2.4s, v3.4s
-; CHECK-GI-NEXT: addv s0, v4.4s
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov b3, v1.b[1]
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b5, v0.b[2]
+; CHECK-GI-NEXT: mov b6, v0.b[3]
+; CHECK-GI-NEXT: mov b7, v1.b[3]
+; CHECK-GI-NEXT: mov b0, v0.b[4]
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov b1, v1.b[4]
+; CHECK-GI-NEXT: fmov w10, s3
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: fmov w11, s2
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov w8, s4
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: fmov w9, s5
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov v2.h[1], w10
+; CHECK-GI-NEXT: mov v3.h[1], w11
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[2], w8
+; CHECK-GI-NEXT: mov v3.h[2], w9
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: fmov w9, s6
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[3], w8
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[4], w8
+; CHECK-GI-NEXT: mov v3.h[4], w9
+; CHECK-GI-NEXT: mul v0.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: umov w8, v0.h[0]
+; CHECK-GI-NEXT: umov w9, v0.h[4]
+; CHECK-GI-NEXT: umov w10, v0.h[1]
+; CHECK-GI-NEXT: fmov s1, w8
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: umov w8, v0.h[2]
+; CHECK-GI-NEXT: umov w9, v0.h[3]
+; CHECK-GI-NEXT: mov v1.s[1], w10
+; CHECK-GI-NEXT: mov v2.s[1], wzr
+; CHECK-GI-NEXT: mov v1.s[2], w8
+; CHECK-GI-NEXT: mov v2.s[2], wzr
+; CHECK-GI-NEXT: mov v1.s[3], w9
+; CHECK-GI-NEXT: mov v2.s[3], wzr
+; CHECK-GI-NEXT: add v0.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -511,31 +500,60 @@ define i32 @test_sdot_v5i8(ptr nocapture readonly %a, ptr nocapture readonly %b,
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: ldr d0, [x0]
; CHECK-GI-NEXT: ldr d1, [x1]
-; CHECK-GI-NEXT: smov w8, v1.b[4]
-; CHECK-GI-NEXT: smov w9, v0.b[4]
-; CHECK-GI-NEXT: smov w10, v1.b[0]
-; CHECK-GI-NEXT: smov w12, v0.b[0]
-; CHECK-GI-NEXT: smov w11, v1.b[1]
-; CHECK-GI-NEXT: smov w13, v0.b[1]
-; CHECK-GI-NEXT: mul w8, w8, w9
-; CHECK-GI-NEXT: fmov s2, w10
-; CHECK-GI-NEXT: smov w9, v1.b[2]
-; CHECK-GI-NEXT: fmov s3, w12
-; CHECK-GI-NEXT: smov w10, v1.b[3]
-; CHECK-GI-NEXT: fmov s4, w8
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: smov w8, v0.b[2]
-; CHECK-GI-NEXT: mov v3.s[1], w13
-; CHECK-GI-NEXT: smov w11, v0.b[3]
-; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: mov v2.s[2], w9
-; CHECK-GI-NEXT: mov v3.s[2], w8
-; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v2.s[3], w10
-; CHECK-GI-NEXT: mov v3.s[3], w11
-; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mla v4.4s, v2.4s, v3.4s
-; CHECK-GI-NEXT: addv s0, v4.4s
+; CHECK-GI-NEXT: mov b2, v0.b[1]
+; CHECK-GI-NEXT: mov b3, v1.b[1]
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: mov b4, v1.b[2]
+; CHECK-GI-NEXT: mov b5, v0.b[2]
+; CHECK-GI-NEXT: mov b6, v0.b[3]
+; CHECK-GI-NEXT: mov b7, v1.b[3]
+; CHECK-GI-NEXT: mov b0, v0.b[4]
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov b1, v1.b[4]
+; CHECK-GI-NEXT: fmov w10, s3
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov w11, s2
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov w8, s4
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: fmov w9, s5
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v2.h[1], w10
+; CHECK-GI-NEXT: mov v3.h[1], w11
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[2], w8
+; CHECK-GI-NEXT: mov v3.h[2], w9
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: fmov w9, s6
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[3], w8
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[4], w8
+; CHECK-GI-NEXT: mov v3.h[4], w9
+; CHECK-GI-NEXT: mul v0.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: smov w8, v0.h[0]
+; CHECK-GI-NEXT: smov w9, v0.h[4]
+; CHECK-GI-NEXT: smov w10, v0.h[1]
+; CHECK-GI-NEXT: fmov s1, w8
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: smov w8, v0.h[2]
+; CHECK-GI-NEXT: smov w9, v0.h[3]
+; CHECK-GI-NEXT: mov v1.s[1], w10
+; CHECK-GI-NEXT: mov v2.s[1], wzr
+; CHECK-GI-NEXT: mov v1.s[2], w8
+; CHECK-GI-NEXT: mov v2.s[2], wzr
+; CHECK-GI-NEXT: mov v1.s[3], w9
+; CHECK-GI-NEXT: mov v2.s[3], wzr
+; CHECK-GI-NEXT: add v0.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: add w0, w8, w2
; CHECK-GI-NEXT: ret
@@ -571,59 +589,117 @@ define i32 @test_sdot_v5i8_double(<5 x i8> %a, <5 x i8> %b, <5 x i8> %c, <5 x i8
; CHECK-GI-LABEL: test_sdot_v5i8_double:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT: mov b17, v0.b[1]
+; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: // kill: def $d1 killed $d1 def $q1
; CHECK-GI-NEXT: // kill: def $d2 killed $d2 def $q2
; CHECK-GI-NEXT: // kill: def $d3 killed $d3 def $q3
-; CHECK-GI-NEXT: smov w9, v1.b[0]
-; CHECK-GI-NEXT: smov w10, v0.b[4]
-; CHECK-GI-NEXT: smov w11, v1.b[4]
-; CHECK-GI-NEXT: smov w12, v2.b[0]
-; CHECK-GI-NEXT: smov w13, v2.b[4]
-; CHECK-GI-NEXT: smov w14, v3.b[4]
-; CHECK-GI-NEXT: smov w8, v0.b[0]
-; CHECK-GI-NEXT: smov w16, v3.b[0]
-; CHECK-GI-NEXT: smov w15, v0.b[1]
-; CHECK-GI-NEXT: fmov s5, w9
-; CHECK-GI-NEXT: mul w9, w10, w11
-; CHECK-GI-NEXT: smov w10, v1.b[1]
-; CHECK-GI-NEXT: fmov s6, w12
-; CHECK-GI-NEXT: mul w12, w13, w14
-; CHECK-GI-NEXT: smov w11, v2.b[1]
-; CHECK-GI-NEXT: smov w13, v3.b[1]
-; CHECK-GI-NEXT: fmov s4, w8
-; CHECK-GI-NEXT: fmov s7, w16
-; CHECK-GI-NEXT: fmov s16, w9
-; CHECK-GI-NEXT: smov w8, v0.b[2]
-; CHECK-GI-NEXT: smov w14, v1.b[2]
-; CHECK-GI-NEXT: fmov s17, w12
-; CHECK-GI-NEXT: smov w9, v3.b[2]
-; CHECK-GI-NEXT: mov v5.s[1], w10
-; CHECK-GI-NEXT: mov v4.s[1], w15
-; CHECK-GI-NEXT: smov w15, v2.b[2]
-; CHECK-GI-NEXT: mov v6.s[1], w11
-; CHECK-GI-NEXT: mov v16.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], w13
-; CHECK-GI-NEXT: smov w10, v0.b[3]
-; CHECK-GI-NEXT: mov v17.s[1], wzr
-; CHECK-GI-NEXT: smov w11, v1.b[3]
-; CHECK-GI-NEXT: smov w12, v2.b[3]
-; CHECK-GI-NEXT: smov w13, v3.b[3]
-; CHECK-GI-NEXT: mov v5.s[2], w14
-; CHECK-GI-NEXT: mov v4.s[2], w8
-; CHECK-GI-NEXT: mov v6.s[2], w15
-; CHECK-GI-NEXT: mov v16.s[2], wzr
-; CHECK-GI-NEXT: mov v7.s[2], w9
-; CHECK-GI-NEXT: mov v17.s[2], wzr
-; CHECK-GI-NEXT: mov v5.s[3], w11
-; CHECK-GI-NEXT: mov v4.s[3], w10
-; CHECK-GI-NEXT: mov v6.s[3], w12
-; CHECK-GI-NEXT: mov v16.s[3], wzr
-; CHECK-GI-NEXT: mov v7.s[3], w13
-; CHECK-GI-NEXT: mov v17.s[3], wzr
-; CHECK-GI-NEXT: mla v16.4s, v4.4s, v5.4s
-; CHECK-GI-NEXT: mla v17.4s, v6.4s, v7.4s
-; CHECK-GI-NEXT: addv s0, v16.4s
-; CHECK-GI-NEXT: addv s1, v17.4s
+; CHECK-GI-NEXT: fmov w11, s1
+; CHECK-GI-NEXT: mov b25, v1.b[1]
+; CHECK-GI-NEXT: mov b16, v1.b[2]
+; CHECK-GI-NEXT: mov b7, v1.b[3]
+; CHECK-GI-NEXT: mov b5, v1.b[4]
+; CHECK-GI-NEXT: mov b22, v2.b[1]
+; CHECK-GI-NEXT: mov b23, v3.b[1]
+; CHECK-GI-NEXT: sxtb w9, w8
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov b24, v0.b[2]
+; CHECK-GI-NEXT: fmov w8, s17
+; CHECK-GI-NEXT: mov b6, v0.b[3]
+; CHECK-GI-NEXT: mov b4, v0.b[4]
+; CHECK-GI-NEXT: fmov s1, w9
+; CHECK-GI-NEXT: mov b18, v2.b[2]
+; CHECK-GI-NEXT: mov b19, v2.b[3]
+; CHECK-GI-NEXT: mov b0, v2.b[4]
+; CHECK-GI-NEXT: fmov w9, s25
+; CHECK-GI-NEXT: fmov w12, s22
+; CHECK-GI-NEXT: sxtb w10, w8
+; CHECK-GI-NEXT: mov b21, v3.b[2]
+; CHECK-GI-NEXT: fmov w13, s23
+; CHECK-GI-NEXT: mov b20, v3.b[3]
+; CHECK-GI-NEXT: mov b17, v3.b[4]
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: mov v1.h[1], w10
+; CHECK-GI-NEXT: sxtb w13, w13
+; CHECK-GI-NEXT: fmov w10, s2
+; CHECK-GI-NEXT: fmov s2, w11
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: fmov w11, s3
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v2.h[1], w9
+; CHECK-GI-NEXT: fmov w9, s16
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v1.h[2], w8
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: fmov s3, w10
+; CHECK-GI-NEXT: fmov w10, s18
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov s22, w11
+; CHECK-GI-NEXT: fmov w11, s21
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v3.h[1], w12
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v2.h[2], w9
+; CHECK-GI-NEXT: mov v22.h[1], w13
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: fmov w9, s19
+; CHECK-GI-NEXT: fmov w12, s6
+; CHECK-GI-NEXT: mov v3.h[2], w10
+; CHECK-GI-NEXT: fmov w10, s20
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v22.h[2], w11
+; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: fmov w11, s4
+; CHECK-GI-NEXT: mov v2.h[3], w8
+; CHECK-GI-NEXT: fmov w8, s5
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v1.h[3], w12
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: fmov w9, s0
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v22.h[3], w10
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v1.h[4], w11
+; CHECK-GI-NEXT: mov v2.h[4], w8
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v3.h[4], w9
+; CHECK-GI-NEXT: mov v22.h[4], w10
+; CHECK-GI-NEXT: mul v0.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT: mul v1.8h, v3.8h, v22.8h
+; CHECK-GI-NEXT: smov w8, v0.h[0]
+; CHECK-GI-NEXT: smov w9, v0.h[4]
+; CHECK-GI-NEXT: smov w11, v0.h[1]
+; CHECK-GI-NEXT: smov w10, v1.h[0]
+; CHECK-GI-NEXT: smov w12, v1.h[4]
+; CHECK-GI-NEXT: smov w13, v1.h[1]
+; CHECK-GI-NEXT: fmov s2, w8
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: smov w8, v0.h[2]
+; CHECK-GI-NEXT: smov w9, v1.h[2]
+; CHECK-GI-NEXT: fmov s4, w10
+; CHECK-GI-NEXT: fmov s5, w12
+; CHECK-GI-NEXT: mov v2.s[1], w11
+; CHECK-GI-NEXT: mov v3.s[1], wzr
+; CHECK-GI-NEXT: smov w10, v0.h[3]
+; CHECK-GI-NEXT: smov w11, v1.h[3]
+; CHECK-GI-NEXT: mov v4.s[1], w13
+; CHECK-GI-NEXT: mov v5.s[1], wzr
+; CHECK-GI-NEXT: mov v2.s[2], w8
+; CHECK-GI-NEXT: mov v3.s[2], wzr
+; CHECK-GI-NEXT: mov v4.s[2], w9
+; CHECK-GI-NEXT: mov v5.s[2], wzr
+; CHECK-GI-NEXT: mov v2.s[3], w10
+; CHECK-GI-NEXT: mov v3.s[3], wzr
+; CHECK-GI-NEXT: mov v4.s[3], w11
+; CHECK-GI-NEXT: mov v5.s[3], wzr
+; CHECK-GI-NEXT: add v0.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: add v1.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: addv s0, v0.4s
+; CHECK-GI-NEXT: addv s1, v1.4s
; CHECK-GI-NEXT: fmov w8, s0
; CHECK-GI-NEXT: fmov w9, s1
; CHECK-GI-NEXT: add w0, w8, w9
@@ -2303,11 +2379,14 @@ define i32 @test_udot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_udot_v25i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: stp x26, x25, [sp, #-64]! // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
; CHECK-GI-NEXT: .cfi_offset w19, -8
; CHECK-GI-NEXT: .cfi_offset w20, -16
; CHECK-GI-NEXT: .cfi_offset w21, -24
@@ -2316,132 +2395,282 @@ define i32 @test_udot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
; CHECK-GI-NEXT: .cfi_offset w24, -48
; CHECK-GI-NEXT: .cfi_offset w25, -56
; CHECK-GI-NEXT: .cfi_offset w26, -64
-; CHECK-GI-NEXT: ldp q1, q7, [x1]
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q2, q1, [x1]
; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: ldp q16, q3, [x0]
-; CHECK-GI-NEXT: umov w9, v1.b[4]
-; CHECK-GI-NEXT: umov w11, v1.b[5]
-; CHECK-GI-NEXT: umov w18, v1.b[0]
-; CHECK-GI-NEXT: umov w0, v1.b[12]
-; CHECK-GI-NEXT: umov w3, v7.b[4]
-; CHECK-GI-NEXT: umov w12, v1.b[1]
-; CHECK-GI-NEXT: umov w13, v1.b[6]
-; CHECK-GI-NEXT: umov w1, v1.b[13]
-; CHECK-GI-NEXT: umov w4, v7.b[5]
-; CHECK-GI-NEXT: umov w15, v1.b[2]
-; CHECK-GI-NEXT: umov w8, v1.b[3]
-; CHECK-GI-NEXT: umov w16, v1.b[7]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: umov w14, v1.b[8]
-; CHECK-GI-NEXT: umov w17, v1.b[9]
-; CHECK-GI-NEXT: umov w10, v1.b[10]
-; CHECK-GI-NEXT: umov w9, v1.b[11]
-; CHECK-GI-NEXT: umov w5, v1.b[14]
-; CHECK-GI-NEXT: umov w6, v7.b[0]
-; CHECK-GI-NEXT: fmov s4, w0
-; CHECK-GI-NEXT: fmov s5, w3
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: umov w11, v1.b[15]
-; CHECK-GI-NEXT: fmov s1, w18
-; CHECK-GI-NEXT: umov w7, v7.b[1]
-; CHECK-GI-NEXT: umov w18, v7.b[6]
-; CHECK-GI-NEXT: umov w21, v16.b[4]
-; CHECK-GI-NEXT: mov v4.s[1], w1
-; CHECK-GI-NEXT: mov v5.s[1], w4
-; CHECK-GI-NEXT: fmov s6, w14
-; CHECK-GI-NEXT: mov v1.s[1], w12
-; CHECK-GI-NEXT: umov w12, v7.b[3]
-; CHECK-GI-NEXT: umov w14, v7.b[7]
-; CHECK-GI-NEXT: mov v2.s[2], w13
-; CHECK-GI-NEXT: umov w13, v7.b[2]
-; CHECK-GI-NEXT: umov w0, v7.b[8]
-; CHECK-GI-NEXT: fmov s7, w6
-; CHECK-GI-NEXT: umov w23, v16.b[12]
-; CHECK-GI-NEXT: umov w25, v3.b[4]
-; CHECK-GI-NEXT: mov v6.s[1], w17
-; CHECK-GI-NEXT: mov v4.s[2], w5
-; CHECK-GI-NEXT: mov v5.s[2], w18
-; CHECK-GI-NEXT: mov v1.s[2], w15
-; CHECK-GI-NEXT: umov w6, v16.b[0]
-; CHECK-GI-NEXT: umov w3, v16.b[1]
-; CHECK-GI-NEXT: mov v2.s[3], w16
-; CHECK-GI-NEXT: mov v7.s[1], w7
-; CHECK-GI-NEXT: umov w16, v16.b[2]
-; CHECK-GI-NEXT: umov w15, v16.b[3]
-; CHECK-GI-NEXT: umov w22, v16.b[5]
-; CHECK-GI-NEXT: umov w5, v16.b[6]
-; CHECK-GI-NEXT: umov w18, v16.b[7]
-; CHECK-GI-NEXT: umov w19, v16.b[8]
-; CHECK-GI-NEXT: umov w7, v16.b[9]
-; CHECK-GI-NEXT: umov w24, v16.b[13]
-; CHECK-GI-NEXT: umov w1, v16.b[10]
-; CHECK-GI-NEXT: umov w17, v16.b[11]
-; CHECK-GI-NEXT: umov w20, v16.b[14]
-; CHECK-GI-NEXT: umov w4, v16.b[15]
-; CHECK-GI-NEXT: fmov s16, w21
-; CHECK-GI-NEXT: umov w21, v3.b[8]
-; CHECK-GI-NEXT: umov w26, v3.b[5]
-; CHECK-GI-NEXT: fmov s17, w23
-; CHECK-GI-NEXT: umov w23, v3.b[0]
-; CHECK-GI-NEXT: fmov s18, w25
-; CHECK-GI-NEXT: umov w25, v3.b[3]
-; CHECK-GI-NEXT: mov v16.s[1], w22
-; CHECK-GI-NEXT: umov w22, v3.b[1]
-; CHECK-GI-NEXT: fmov s19, w6
-; CHECK-GI-NEXT: mov v17.s[1], w24
-; CHECK-GI-NEXT: umov w24, v3.b[2]
-; CHECK-GI-NEXT: umov w6, v3.b[7]
-; CHECK-GI-NEXT: mul w0, w0, w21
-; CHECK-GI-NEXT: mov v18.s[1], w26
-; CHECK-GI-NEXT: umov w26, v3.b[6]
-; CHECK-GI-NEXT: fmov s3, w19
-; CHECK-GI-NEXT: fmov s20, w23
-; CHECK-GI-NEXT: mov v19.s[1], w3
-; CHECK-GI-NEXT: mov v16.s[2], w5
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
+; CHECK-GI-NEXT: mov b6, v2.b[3]
+; CHECK-GI-NEXT: mov b7, v2.b[4]
+; CHECK-GI-NEXT: mov b16, v2.b[5]
+; CHECK-GI-NEXT: mov b19, v2.b[8]
+; CHECK-GI-NEXT: mov b4, v2.b[1]
+; CHECK-GI-NEXT: mov b5, v2.b[2]
+; CHECK-GI-NEXT: mov b17, v2.b[6]
+; CHECK-GI-NEXT: mov b18, v2.b[7]
+; CHECK-GI-NEXT: mov b20, v2.b[9]
+; CHECK-GI-NEXT: mov b21, v2.b[10]
+; CHECK-GI-NEXT: mov b22, v2.b[11]
+; CHECK-GI-NEXT: fmov w7, s2
+; CHECK-GI-NEXT: fmov w13, s6
+; CHECK-GI-NEXT: mov b6, v2.b[12]
+; CHECK-GI-NEXT: fmov w2, s7
+; CHECK-GI-NEXT: mov b7, v2.b[13]
+; CHECK-GI-NEXT: fmov w11, s16
+; CHECK-GI-NEXT: mov b16, v2.b[14]
+; CHECK-GI-NEXT: mov b23, v2.b[15]
+; CHECK-GI-NEXT: ldp q3, q2, [x0]
+; CHECK-GI-NEXT: fmov w26, s19
+; CHECK-GI-NEXT: fmov w19, s4
+; CHECK-GI-NEXT: stp s17, s18, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: fmov w29, s5
+; CHECK-GI-NEXT: fmov w24, s20
+; CHECK-GI-NEXT: uxtb w8, w7
+; CHECK-GI-NEXT: mov b4, v3.b[2]
+; CHECK-GI-NEXT: mov b5, v3.b[1]
+; CHECK-GI-NEXT: uxtb w13, w13
+; CHECK-GI-NEXT: mov b17, v1.b[1]
+; CHECK-GI-NEXT: fmov w22, s21
+; CHECK-GI-NEXT: uxtb w26, w26
+; CHECK-GI-NEXT: mov b18, v1.b[2]
+; CHECK-GI-NEXT: fmov w18, s22
+; CHECK-GI-NEXT: uxtb w24, w24
+; CHECK-GI-NEXT: mov b19, v1.b[3]
+; CHECK-GI-NEXT: fmov w16, s6
+; CHECK-GI-NEXT: uxtb w19, w19
+; CHECK-GI-NEXT: mov b21, v1.b[4]
+; CHECK-GI-NEXT: fmov w15, s7
+; CHECK-GI-NEXT: uxtb w22, w22
+; CHECK-GI-NEXT: mov b7, v1.b[5]
+; CHECK-GI-NEXT: mov b6, v3.b[3]
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: fmov w12, s23
+; CHECK-GI-NEXT: mov b22, v1.b[6]
+; CHECK-GI-NEXT: mov b23, v1.b[7]
+; CHECK-GI-NEXT: mov b20, v3.b[4]
+; CHECK-GI-NEXT: fmov w28, s4
+; CHECK-GI-NEXT: fmov s4, w26
+; CHECK-GI-NEXT: fmov w14, s16
+; CHECK-GI-NEXT: fmov w27, s17
+; CHECK-GI-NEXT: fmov w5, s18
+; CHECK-GI-NEXT: uxtb w12, w12
+; CHECK-GI-NEXT: fmov w4, s19
+; CHECK-GI-NEXT: mov b19, v3.b[5]
+; CHECK-GI-NEXT: uxtb w28, w28
+; CHECK-GI-NEXT: fmov w3, s21
+; CHECK-GI-NEXT: mov b18, v3.b[6]
+; CHECK-GI-NEXT: uxtb w27, w27
+; CHECK-GI-NEXT: uxtb w5, w5
+; CHECK-GI-NEXT: fmov w1, s7
+; CHECK-GI-NEXT: mov b16, v3.b[7]
+; CHECK-GI-NEXT: fmov w0, s22
+; CHECK-GI-NEXT: mov b17, v3.b[8]
+; CHECK-GI-NEXT: fmov w17, s23
+; CHECK-GI-NEXT: mov b7, v3.b[9]
+; CHECK-GI-NEXT: fmov w30, s5
+; CHECK-GI-NEXT: mov b5, v3.b[10]
+; CHECK-GI-NEXT: mov b21, v3.b[11]
+; CHECK-GI-NEXT: fmov w25, s6
+; CHECK-GI-NEXT: mov b6, v3.b[12]
+; CHECK-GI-NEXT: fmov w23, s20
+; CHECK-GI-NEXT: mov b20, v3.b[13]
+; CHECK-GI-NEXT: mov b22, v3.b[14]
+; CHECK-GI-NEXT: fmov w6, s3
+; CHECK-GI-NEXT: mov b23, v3.b[15]
+; CHECK-GI-NEXT: fmov s3, w8
+; CHECK-GI-NEXT: fmov w8, s1
+; CHECK-GI-NEXT: mov v4.h[1], w24
+; CHECK-GI-NEXT: fmov w21, s19
+; CHECK-GI-NEXT: mov b19, v2.b[1]
+; CHECK-GI-NEXT: fmov w9, s17
+; CHECK-GI-NEXT: fmov w24, s6
+; CHECK-GI-NEXT: fmov w7, s16
+; CHECK-GI-NEXT: mov b16, v2.b[2]
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov v3.h[1], w19
+; CHECK-GI-NEXT: uxtb w19, w29
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: fmov w29, s5
+; CHECK-GI-NEXT: mov v4.h[2], w22
+; CHECK-GI-NEXT: uxtb w22, w6
+; CHECK-GI-NEXT: fmov s5, w8
+; CHECK-GI-NEXT: fmov w10, s7
+; CHECK-GI-NEXT: fmov s7, w9
+; CHECK-GI-NEXT: fmov w9, s16
+; CHECK-GI-NEXT: fmov w20, s18
+; CHECK-GI-NEXT: uxtb w29, w29
+; CHECK-GI-NEXT: fmov s6, w22
+; CHECK-GI-NEXT: fmov w22, s2
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: mov v5.h[1], w27
+; CHECK-GI-NEXT: uxtb w27, w30
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov b18, v2.b[3]
+; CHECK-GI-NEXT: mov v3.h[2], w19
+; CHECK-GI-NEXT: uxtb w22, w22
+; CHECK-GI-NEXT: mov v6.h[1], w27
+; CHECK-GI-NEXT: fmov w27, s19
+; CHECK-GI-NEXT: mov v7.h[1], w10
+; CHECK-GI-NEXT: fmov w26, s21
+; CHECK-GI-NEXT: mov b17, v2.b[4]
+; CHECK-GI-NEXT: fmov s16, w22
+; CHECK-GI-NEXT: mov v5.h[2], w5
+; CHECK-GI-NEXT: uxtb w5, w25
+; CHECK-GI-NEXT: uxtb w27, w27
+; CHECK-GI-NEXT: fmov w10, s18
+; CHECK-GI-NEXT: mov v3.h[3], w13
+; CHECK-GI-NEXT: uxtb w13, w4
+; CHECK-GI-NEXT: mov v6.h[2], w28
+; CHECK-GI-NEXT: fmov w8, s20
+; CHECK-GI-NEXT: mov v16.h[1], w27
+; CHECK-GI-NEXT: mov v7.h[2], w29
+; CHECK-GI-NEXT: mov b20, v2.b[5]
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: fmov w22, s17
+; CHECK-GI-NEXT: mov v5.h[3], w13
+; CHECK-GI-NEXT: uxtb w13, w2
+; CHECK-GI-NEXT: mov v6.h[3], w5
+; CHECK-GI-NEXT: mov b21, v2.b[6]
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: uxtb w9, w18
+; CHECK-GI-NEXT: uxtb w18, w23
+; CHECK-GI-NEXT: mov v3.h[4], w13
+; CHECK-GI-NEXT: uxtb w13, w24
+; CHECK-GI-NEXT: fmov w27, s20
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v4.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w26
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v16.h[3], w10
+; CHECK-GI-NEXT: uxtb w10, w3
+; CHECK-GI-NEXT: mov v6.h[4], w18
+; CHECK-GI-NEXT: ldr w18, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w16
+; CHECK-GI-NEXT: uxtb w16, w22
+; CHECK-GI-NEXT: mov v5.h[4], w10
+; CHECK-GI-NEXT: uxtb w10, w15
+; CHECK-GI-NEXT: uxtb w18, w18
+; CHECK-GI-NEXT: mov v4.h[4], w9
+; CHECK-GI-NEXT: uxtb w9, w21
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v16.h[4], w16
+; CHECK-GI-NEXT: mov v7.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v6.h[5], w9
+; CHECK-GI-NEXT: uxtb w9, w1
+; CHECK-GI-NEXT: mov v3.h[5], w11
+; CHECK-GI-NEXT: uxtb w11, w27
+; CHECK-GI-NEXT: fmov w19, s22
+; CHECK-GI-NEXT: fmov w28, s21
+; CHECK-GI-NEXT: uxtb w13, w13
+; CHECK-GI-NEXT: mov b17, v2.b[7]
+; CHECK-GI-NEXT: mov v5.h[5], w9
+; CHECK-GI-NEXT: uxtb w9, w0
+; CHECK-GI-NEXT: mov v4.h[5], w10
+; CHECK-GI-NEXT: uxtb w10, w20
+; CHECK-GI-NEXT: mov v7.h[5], w8
+; CHECK-GI-NEXT: mov v16.h[5], w11
+; CHECK-GI-NEXT: uxtb w8, w14
+; CHECK-GI-NEXT: uxtb w11, w28
+; CHECK-GI-NEXT: mov v6.h[6], w10
+; CHECK-GI-NEXT: uxtb w10, w19
+; CHECK-GI-NEXT: fmov w6, s23
+; CHECK-GI-NEXT: mov v5.h[6], w9
+; CHECK-GI-NEXT: fmov w9, s17
+; CHECK-GI-NEXT: mov v3.h[6], w18
+; CHECK-GI-NEXT: mov v4.h[6], w8
+; CHECK-GI-NEXT: uxtb w8, w7
+; CHECK-GI-NEXT: mov v7.h[6], w10
+; CHECK-GI-NEXT: mov v16.h[6], w11
+; CHECK-GI-NEXT: uxtb w10, w6
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v6.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w0
-; CHECK-GI-NEXT: mov v17.s[2], w20
-; CHECK-GI-NEXT: mov v4.s[3], w11
-; CHECK-GI-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v3.s[1], w7
-; CHECK-GI-NEXT: mov v20.s[1], w22
-; CHECK-GI-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[2], w26
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mov v16.s[3], w18
-; CHECK-GI-NEXT: mov v17.s[3], w4
-; CHECK-GI-NEXT: mov v7.s[2], w13
-; CHECK-GI-NEXT: mov v5.s[3], w14
-; CHECK-GI-NEXT: mov v19.s[2], w16
-; CHECK-GI-NEXT: mov v3.s[2], w1
+; CHECK-GI-NEXT: mov v6.h[7], w8
+; CHECK-GI-NEXT: uxtb w8, w17
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v3.h[7], w13
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v4.h[7], w12
+; CHECK-GI-NEXT: mov v5.h[7], w8
+; CHECK-GI-NEXT: mov v7.h[7], w10
+; CHECK-GI-NEXT: mov v16.h[7], w9
+; CHECK-GI-NEXT: umov w8, v1.b[8]
+; CHECK-GI-NEXT: umov w9, v2.b[8]
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v20.s[2], w24
-; CHECK-GI-NEXT: ldp x24, x23, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[3], w6
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mul v2.4s, v2.4s, v16.4s
-; CHECK-GI-NEXT: mul v4.4s, v4.4s, v17.4s
-; CHECK-GI-NEXT: mov v1.s[3], w8
-; CHECK-GI-NEXT: mov v6.s[3], w9
-; CHECK-GI-NEXT: mov v7.s[3], w12
-; CHECK-GI-NEXT: mov v19.s[3], w15
-; CHECK-GI-NEXT: mov v3.s[3], w17
-; CHECK-GI-NEXT: mov v20.s[3], w25
+; CHECK-GI-NEXT: mul v3.8h, v3.8h, v6.8h
+; CHECK-GI-NEXT: mul v2.8h, v4.8h, v7.8h
+; CHECK-GI-NEXT: mul v1.8h, v5.8h, v16.8h
+; CHECK-GI-NEXT: mul w15, w8, w9
; CHECK-GI-NEXT: mov v0.s[3], wzr
-; CHECK-GI-NEXT: mul v5.4s, v5.4s, v18.4s
-; CHECK-GI-NEXT: mov v21.s[3], wzr
-; CHECK-GI-NEXT: mla v2.4s, v1.4s, v19.4s
-; CHECK-GI-NEXT: mla v4.4s, v6.4s, v3.4s
-; CHECK-GI-NEXT: mla v5.4s, v7.4s, v20.4s
-; CHECK-GI-NEXT: add v0.4s, v21.4s, v0.4s
-; CHECK-GI-NEXT: add v1.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-GI-NEXT: umov w16, v3.h[0]
+; CHECK-GI-NEXT: umov w18, v3.h[4]
+; CHECK-GI-NEXT: umov w17, v3.h[1]
+; CHECK-GI-NEXT: umov w1, v2.h[0]
+; CHECK-GI-NEXT: umov w3, v2.h[4]
+; CHECK-GI-NEXT: umov w0, v3.h[5]
+; CHECK-GI-NEXT: umov w5, v1.h[0]
+; CHECK-GI-NEXT: umov w7, v1.h[4]
+; CHECK-GI-NEXT: umov w2, v2.h[1]
+; CHECK-GI-NEXT: umov w4, v2.h[5]
+; CHECK-GI-NEXT: umov w6, v1.h[1]
+; CHECK-GI-NEXT: umov w19, v1.h[5]
+; CHECK-GI-NEXT: umov w10, v3.h[2]
+; CHECK-GI-NEXT: umov w8, v3.h[3]
+; CHECK-GI-NEXT: umov w11, v3.h[6]
+; CHECK-GI-NEXT: umov w9, v3.h[7]
+; CHECK-GI-NEXT: fmov s3, w16
+; CHECK-GI-NEXT: fmov s4, w18
+; CHECK-GI-NEXT: fmov s5, w1
+; CHECK-GI-NEXT: fmov s6, w3
+; CHECK-GI-NEXT: fmov s7, w5
+; CHECK-GI-NEXT: fmov s16, w7
+; CHECK-GI-NEXT: fmov s17, w15
+; CHECK-GI-NEXT: umov w12, v2.h[2]
+; CHECK-GI-NEXT: umov w13, v2.h[6]
+; CHECK-GI-NEXT: umov w14, v1.h[2]
+; CHECK-GI-NEXT: umov w16, v1.h[6]
+; CHECK-GI-NEXT: mov v3.s[1], w17
+; CHECK-GI-NEXT: mov v4.s[1], w0
+; CHECK-GI-NEXT: mov v5.s[1], w2
+; CHECK-GI-NEXT: mov v6.s[1], w4
+; CHECK-GI-NEXT: mov v7.s[1], w6
+; CHECK-GI-NEXT: mov v16.s[1], w19
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v17.s[1], wzr
+; CHECK-GI-NEXT: umov w15, v2.h[3]
+; CHECK-GI-NEXT: umov w17, v2.h[7]
+; CHECK-GI-NEXT: umov w18, v1.h[3]
+; CHECK-GI-NEXT: umov w0, v1.h[7]
+; CHECK-GI-NEXT: mov v3.s[2], w10
+; CHECK-GI-NEXT: mov v4.s[2], w11
+; CHECK-GI-NEXT: mov v5.s[2], w12
+; CHECK-GI-NEXT: mov v6.s[2], w13
+; CHECK-GI-NEXT: mov v7.s[2], w14
+; CHECK-GI-NEXT: mov v16.s[2], w16
+; CHECK-GI-NEXT: mov v17.s[2], wzr
+; CHECK-GI-NEXT: mov v3.s[3], w8
+; CHECK-GI-NEXT: mov v4.s[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v5.s[3], w15
+; CHECK-GI-NEXT: mov v6.s[3], w17
+; CHECK-GI-NEXT: mov v7.s[3], w18
+; CHECK-GI-NEXT: mov v16.s[3], w0
+; CHECK-GI-NEXT: mov v17.s[3], wzr
+; CHECK-GI-NEXT: add v1.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v2.4s, v5.4s, v6.4s
+; CHECK-GI-NEXT: add v3.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v0.4s, v17.4s, v0.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldp x26, x25, [sp], #64 // 16-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <25 x i8>, ptr %a
@@ -2580,11 +2809,14 @@ define i32 @test_sdot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_sdot_v25i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: stp x26, x25, [sp, #-64]! // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x22, x21, [sp, #32] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 64
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
; CHECK-GI-NEXT: .cfi_offset w19, -8
; CHECK-GI-NEXT: .cfi_offset w20, -16
; CHECK-GI-NEXT: .cfi_offset w21, -24
@@ -2593,132 +2825,283 @@ define i32 @test_sdot_v25i8(ptr nocapture readonly %a, ptr nocapture readonly %b
; CHECK-GI-NEXT: .cfi_offset w24, -48
; CHECK-GI-NEXT: .cfi_offset w25, -56
; CHECK-GI-NEXT: .cfi_offset w26, -64
-; CHECK-GI-NEXT: ldp q1, q7, [x1]
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q2, q1, [x1]
; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: ldp q16, q3, [x0]
-; CHECK-GI-NEXT: smov w9, v1.b[4]
-; CHECK-GI-NEXT: smov w11, v1.b[5]
-; CHECK-GI-NEXT: smov w18, v1.b[0]
-; CHECK-GI-NEXT: smov w0, v1.b[12]
-; CHECK-GI-NEXT: smov w3, v7.b[4]
-; CHECK-GI-NEXT: smov w12, v1.b[1]
-; CHECK-GI-NEXT: smov w13, v1.b[6]
-; CHECK-GI-NEXT: smov w1, v1.b[13]
-; CHECK-GI-NEXT: smov w4, v7.b[5]
-; CHECK-GI-NEXT: smov w15, v1.b[2]
-; CHECK-GI-NEXT: smov w8, v1.b[3]
-; CHECK-GI-NEXT: smov w16, v1.b[7]
-; CHECK-GI-NEXT: fmov s2, w9
-; CHECK-GI-NEXT: smov w14, v1.b[8]
-; CHECK-GI-NEXT: smov w17, v1.b[9]
-; CHECK-GI-NEXT: smov w10, v1.b[10]
-; CHECK-GI-NEXT: smov w9, v1.b[11]
-; CHECK-GI-NEXT: smov w5, v1.b[14]
-; CHECK-GI-NEXT: smov w6, v7.b[0]
-; CHECK-GI-NEXT: fmov s4, w0
-; CHECK-GI-NEXT: fmov s5, w3
-; CHECK-GI-NEXT: mov v2.s[1], w11
-; CHECK-GI-NEXT: smov w11, v1.b[15]
-; CHECK-GI-NEXT: fmov s1, w18
-; CHECK-GI-NEXT: smov w7, v7.b[1]
-; CHECK-GI-NEXT: smov w18, v7.b[6]
-; CHECK-GI-NEXT: smov w21, v16.b[4]
-; CHECK-GI-NEXT: mov v4.s[1], w1
-; CHECK-GI-NEXT: mov v5.s[1], w4
-; CHECK-GI-NEXT: fmov s6, w14
-; CHECK-GI-NEXT: mov v1.s[1], w12
-; CHECK-GI-NEXT: smov w12, v7.b[3]
-; CHECK-GI-NEXT: smov w14, v7.b[7]
-; CHECK-GI-NEXT: mov v2.s[2], w13
-; CHECK-GI-NEXT: smov w13, v7.b[2]
-; CHECK-GI-NEXT: smov w0, v7.b[8]
-; CHECK-GI-NEXT: fmov s7, w6
-; CHECK-GI-NEXT: smov w23, v16.b[12]
-; CHECK-GI-NEXT: smov w25, v3.b[4]
-; CHECK-GI-NEXT: mov v6.s[1], w17
-; CHECK-GI-NEXT: mov v4.s[2], w5
-; CHECK-GI-NEXT: mov v5.s[2], w18
-; CHECK-GI-NEXT: mov v1.s[2], w15
-; CHECK-GI-NEXT: smov w6, v16.b[0]
-; CHECK-GI-NEXT: smov w3, v16.b[1]
-; CHECK-GI-NEXT: mov v2.s[3], w16
-; CHECK-GI-NEXT: mov v7.s[1], w7
-; CHECK-GI-NEXT: smov w16, v16.b[2]
-; CHECK-GI-NEXT: smov w15, v16.b[3]
-; CHECK-GI-NEXT: smov w22, v16.b[5]
-; CHECK-GI-NEXT: smov w5, v16.b[6]
-; CHECK-GI-NEXT: smov w18, v16.b[7]
-; CHECK-GI-NEXT: smov w19, v16.b[8]
-; CHECK-GI-NEXT: smov w7, v16.b[9]
-; CHECK-GI-NEXT: smov w24, v16.b[13]
-; CHECK-GI-NEXT: smov w1, v16.b[10]
-; CHECK-GI-NEXT: smov w17, v16.b[11]
-; CHECK-GI-NEXT: smov w20, v16.b[14]
-; CHECK-GI-NEXT: smov w4, v16.b[15]
-; CHECK-GI-NEXT: fmov s16, w21
-; CHECK-GI-NEXT: smov w21, v3.b[8]
-; CHECK-GI-NEXT: smov w26, v3.b[5]
-; CHECK-GI-NEXT: fmov s17, w23
-; CHECK-GI-NEXT: smov w23, v3.b[0]
-; CHECK-GI-NEXT: fmov s18, w25
-; CHECK-GI-NEXT: smov w25, v3.b[3]
-; CHECK-GI-NEXT: mov v16.s[1], w22
-; CHECK-GI-NEXT: smov w22, v3.b[1]
-; CHECK-GI-NEXT: fmov s19, w6
-; CHECK-GI-NEXT: mov v17.s[1], w24
-; CHECK-GI-NEXT: smov w24, v3.b[2]
-; CHECK-GI-NEXT: smov w6, v3.b[7]
-; CHECK-GI-NEXT: mul w0, w0, w21
-; CHECK-GI-NEXT: mov v18.s[1], w26
-; CHECK-GI-NEXT: smov w26, v3.b[6]
-; CHECK-GI-NEXT: fmov s3, w19
-; CHECK-GI-NEXT: fmov s20, w23
-; CHECK-GI-NEXT: mov v19.s[1], w3
-; CHECK-GI-NEXT: mov v16.s[2], w5
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
+; CHECK-GI-NEXT: mov b5, v2.b[2]
+; CHECK-GI-NEXT: mov b6, v2.b[3]
+; CHECK-GI-NEXT: mov b7, v2.b[4]
+; CHECK-GI-NEXT: mov b16, v2.b[5]
+; CHECK-GI-NEXT: mov b17, v2.b[6]
+; CHECK-GI-NEXT: mov b18, v2.b[7]
+; CHECK-GI-NEXT: mov b19, v2.b[8]
+; CHECK-GI-NEXT: mov b20, v2.b[9]
+; CHECK-GI-NEXT: mov b21, v2.b[15]
+; CHECK-GI-NEXT: mov b3, v2.b[1]
+; CHECK-GI-NEXT: fmov w19, s2
+; CHECK-GI-NEXT: mov b22, v1.b[6]
+; CHECK-GI-NEXT: fmov w6, s5
+; CHECK-GI-NEXT: mov b5, v2.b[10]
+; CHECK-GI-NEXT: fmov w14, s6
+; CHECK-GI-NEXT: mov b6, v2.b[11]
+; CHECK-GI-NEXT: fmov w2, s7
+; CHECK-GI-NEXT: stp s17, s18, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: mov b7, v2.b[12]
+; CHECK-GI-NEXT: fmov w11, s16
+; CHECK-GI-NEXT: sxtb w28, w19
+; CHECK-GI-NEXT: mov b16, v2.b[13]
+; CHECK-GI-NEXT: mov b18, v1.b[1]
+; CHECK-GI-NEXT: sxtb w6, w6
+; CHECK-GI-NEXT: mov b17, v2.b[14]
+; CHECK-GI-NEXT: ldp q4, q2, [x0]
+; CHECK-GI-NEXT: fmov w25, s19
+; CHECK-GI-NEXT: fmov w24, s20
+; CHECK-GI-NEXT: fmov w22, s5
+; CHECK-GI-NEXT: mov b5, v1.b[2]
+; CHECK-GI-NEXT: fmov w0, s6
+; CHECK-GI-NEXT: sxtb w14, w14
+; CHECK-GI-NEXT: mov b20, v1.b[3]
+; CHECK-GI-NEXT: fmov w16, s7
+; CHECK-GI-NEXT: mov b7, v1.b[4]
+; CHECK-GI-NEXT: fmov w15, s16
+; CHECK-GI-NEXT: sxtb w25, w25
+; CHECK-GI-NEXT: sxtb w24, w24
+; CHECK-GI-NEXT: mov b16, v1.b[5]
+; CHECK-GI-NEXT: fmov w13, s21
+; CHECK-GI-NEXT: sxtb w22, w22
+; CHECK-GI-NEXT: mov b6, v4.b[2]
+; CHECK-GI-NEXT: fmov w26, s18
+; CHECK-GI-NEXT: sxtb w0, w0
+; CHECK-GI-NEXT: mov b21, v1.b[7]
+; CHECK-GI-NEXT: mov b18, v4.b[4]
+; CHECK-GI-NEXT: fmov w7, s3
+; CHECK-GI-NEXT: mov b3, v4.b[1]
+; CHECK-GI-NEXT: fmov w12, s17
+; CHECK-GI-NEXT: fmov w5, s5
+; CHECK-GI-NEXT: mov b19, v4.b[3]
+; CHECK-GI-NEXT: fmov w4, s20
+; CHECK-GI-NEXT: fmov w3, s7
+; CHECK-GI-NEXT: sxtb w29, w7
+; CHECK-GI-NEXT: mov b17, v4.b[5]
+; CHECK-GI-NEXT: fmov w1, s16
+; CHECK-GI-NEXT: sxtb w5, w5
+; CHECK-GI-NEXT: mov b16, v4.b[6]
+; CHECK-GI-NEXT: fmov w18, s22
+; CHECK-GI-NEXT: mov b7, v4.b[7]
+; CHECK-GI-NEXT: fmov w17, s21
+; CHECK-GI-NEXT: mov b5, v4.b[8]
+; CHECK-GI-NEXT: mov b20, v4.b[9]
+; CHECK-GI-NEXT: fmov w27, s6
+; CHECK-GI-NEXT: mov b6, v4.b[10]
+; CHECK-GI-NEXT: mov b21, v4.b[11]
+; CHECK-GI-NEXT: fmov w21, s18
+; CHECK-GI-NEXT: mov b18, v4.b[12]
+; CHECK-GI-NEXT: mov b22, v4.b[13]
+; CHECK-GI-NEXT: mov b23, v4.b[14]
+; CHECK-GI-NEXT: fmov w10, s4
+; CHECK-GI-NEXT: sxtb w27, w27
+; CHECK-GI-NEXT: mov b24, v4.b[15]
+; CHECK-GI-NEXT: fmov s4, w25
+; CHECK-GI-NEXT: fmov w30, s3
+; CHECK-GI-NEXT: fmov s3, w28
+; CHECK-GI-NEXT: fmov w9, s5
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: fmov w7, s7
+; CHECK-GI-NEXT: mov b7, v2.b[1]
+; CHECK-GI-NEXT: mov v4.h[1], w24
+; CHECK-GI-NEXT: fmov w24, s1
+; CHECK-GI-NEXT: fmov w8, s20
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v3.h[1], w29
+; CHECK-GI-NEXT: fmov w29, s6
+; CHECK-GI-NEXT: fmov s6, w10
+; CHECK-GI-NEXT: fmov w10, s2
+; CHECK-GI-NEXT: fmov w19, s16
+; CHECK-GI-NEXT: sxtb w24, w24
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov b16, v2.b[3]
+; CHECK-GI-NEXT: sxtb w29, w29
+; CHECK-GI-NEXT: fmov w23, s19
+; CHECK-GI-NEXT: mov b19, v2.b[2]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: fmov s5, w24
+; CHECK-GI-NEXT: sxtb w24, w30
+; CHECK-GI-NEXT: mov v3.h[2], w6
+; CHECK-GI-NEXT: sxtb w6, w26
+; CHECK-GI-NEXT: fmov w28, s21
+; CHECK-GI-NEXT: sxtb w23, w23
+; CHECK-GI-NEXT: mov v6.h[1], w24
+; CHECK-GI-NEXT: fmov w24, s7
+; CHECK-GI-NEXT: fmov s7, w9
+; CHECK-GI-NEXT: fmov w9, s19
+; CHECK-GI-NEXT: mov v5.h[1], w6
+; CHECK-GI-NEXT: mov v4.h[2], w22
+; CHECK-GI-NEXT: fmov w20, s17
+; CHECK-GI-NEXT: mov b17, v2.b[4]
+; CHECK-GI-NEXT: sxtb w24, w24
+; CHECK-GI-NEXT: mov v3.h[3], w14
+; CHECK-GI-NEXT: sxtb w14, w2
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v7.h[1], w8
+; CHECK-GI-NEXT: fmov w8, s16
+; CHECK-GI-NEXT: fmov s16, w10
+; CHECK-GI-NEXT: mov v6.h[2], w27
+; CHECK-GI-NEXT: mov v5.h[2], w5
+; CHECK-GI-NEXT: fmov w25, s18
+; CHECK-GI-NEXT: mov v4.h[3], w0
+; CHECK-GI-NEXT: sxtb w0, w4
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov b18, v2.b[5]
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: mov v16.h[1], w24
+; CHECK-GI-NEXT: mov v7.h[2], w29
+; CHECK-GI-NEXT: mov v3.h[4], w14
+; CHECK-GI-NEXT: sxtb w14, w25
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v6.h[3], w23
+; CHECK-GI-NEXT: mov v5.h[3], w0
+; CHECK-GI-NEXT: fmov w26, s22
+; CHECK-GI-NEXT: mov b19, v2.b[6]
+; CHECK-GI-NEXT: fmov w27, s18
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: sxtb w9, w28
+; CHECK-GI-NEXT: fmov w22, s23
+; CHECK-GI-NEXT: mov b17, v2.b[7]
+; CHECK-GI-NEXT: fmov w6, s24
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v6.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w0
-; CHECK-GI-NEXT: mov v17.s[2], w20
-; CHECK-GI-NEXT: mov v4.s[3], w11
-; CHECK-GI-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v3.s[1], w7
-; CHECK-GI-NEXT: mov v20.s[1], w22
-; CHECK-GI-NEXT: ldp x22, x21, [sp, #32] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[2], w26
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mov v16.s[3], w18
-; CHECK-GI-NEXT: mov v17.s[3], w4
-; CHECK-GI-NEXT: mov v7.s[2], w13
-; CHECK-GI-NEXT: mov v5.s[3], w14
-; CHECK-GI-NEXT: mov v19.s[2], w16
-; CHECK-GI-NEXT: mov v3.s[2], w1
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: sxtb w9, w11
+; CHECK-GI-NEXT: sxtb w11, w21
+; CHECK-GI-NEXT: fmov w24, s19
+; CHECK-GI-NEXT: mov v16.h[3], w8
+; CHECK-GI-NEXT: sxtb w8, w16
+; CHECK-GI-NEXT: sxtb w16, w3
+; CHECK-GI-NEXT: mov v6.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v3.h[5], w9
+; CHECK-GI-NEXT: sxtb w9, w15
+; CHECK-GI-NEXT: sxtb w15, w27
+; CHECK-GI-NEXT: mov v7.h[4], w14
+; CHECK-GI-NEXT: sxtb w14, w1
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v4.h[4], w8
+; CHECK-GI-NEXT: sxtb w8, w20
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v5.h[4], w16
+; CHECK-GI-NEXT: mov v16.h[4], w10
+; CHECK-GI-NEXT: sxtb w10, w26
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v6.h[5], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v7.h[5], w10
+; CHECK-GI-NEXT: sxtb w10, w12
+; CHECK-GI-NEXT: sxtb w12, w18
+; CHECK-GI-NEXT: mov v4.h[5], w9
+; CHECK-GI-NEXT: sxtb w9, w19
+; CHECK-GI-NEXT: mov v5.h[5], w14
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v16.h[5], w15
+; CHECK-GI-NEXT: mov v3.h[6], w11
+; CHECK-GI-NEXT: sxtb w11, w22
+; CHECK-GI-NEXT: mov v6.h[6], w9
+; CHECK-GI-NEXT: sxtb w9, w13
+; CHECK-GI-NEXT: sxtb w13, w24
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v20.s[2], w24
-; CHECK-GI-NEXT: ldp x24, x23, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v18.s[3], w6
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mul v2.4s, v2.4s, v16.4s
-; CHECK-GI-NEXT: mul v4.4s, v4.4s, v17.4s
-; CHECK-GI-NEXT: mov v1.s[3], w8
-; CHECK-GI-NEXT: mov v6.s[3], w9
-; CHECK-GI-NEXT: mov v7.s[3], w12
-; CHECK-GI-NEXT: mov v19.s[3], w15
-; CHECK-GI-NEXT: mov v3.s[3], w17
-; CHECK-GI-NEXT: mov v20.s[3], w25
+; CHECK-GI-NEXT: mov v7.h[6], w11
+; CHECK-GI-NEXT: fmov w11, s17
+; CHECK-GI-NEXT: mov v4.h[6], w10
+; CHECK-GI-NEXT: sxtb w10, w7
+; CHECK-GI-NEXT: mov v5.h[6], w12
+; CHECK-GI-NEXT: mov v16.h[6], w13
+; CHECK-GI-NEXT: mov v3.h[7], w8
+; CHECK-GI-NEXT: sxtb w8, w6
+; CHECK-GI-NEXT: smov w12, v1.b[8]
+; CHECK-GI-NEXT: mov v6.h[7], w10
+; CHECK-GI-NEXT: sxtb w10, w17
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov v4.h[7], w9
+; CHECK-GI-NEXT: mov v7.h[7], w8
+; CHECK-GI-NEXT: smov w8, v2.b[8]
+; CHECK-GI-NEXT: mov v5.h[7], w10
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v16.h[7], w11
; CHECK-GI-NEXT: mov v0.s[3], wzr
-; CHECK-GI-NEXT: mul v5.4s, v5.4s, v18.4s
-; CHECK-GI-NEXT: mov v21.s[3], wzr
-; CHECK-GI-NEXT: mla v2.4s, v1.4s, v19.4s
-; CHECK-GI-NEXT: mla v4.4s, v6.4s, v3.4s
-; CHECK-GI-NEXT: mla v5.4s, v7.4s, v20.4s
-; CHECK-GI-NEXT: add v0.4s, v21.4s, v0.4s
-; CHECK-GI-NEXT: add v1.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-GI-NEXT: mul v3.8h, v3.8h, v6.8h
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mul v2.8h, v4.8h, v7.8h
+; CHECK-GI-NEXT: mul w16, w12, w8
+; CHECK-GI-NEXT: mul v1.8h, v5.8h, v16.8h
+; CHECK-GI-NEXT: smov w17, v3.h[0]
+; CHECK-GI-NEXT: smov w0, v3.h[4]
+; CHECK-GI-NEXT: sxth w16, w16
+; CHECK-GI-NEXT: smov w2, v2.h[0]
+; CHECK-GI-NEXT: smov w4, v2.h[4]
+; CHECK-GI-NEXT: smov w18, v3.h[1]
+; CHECK-GI-NEXT: smov w1, v3.h[5]
+; CHECK-GI-NEXT: smov w3, v2.h[1]
+; CHECK-GI-NEXT: smov w5, v2.h[5]
+; CHECK-GI-NEXT: smov w6, v1.h[0]
+; CHECK-GI-NEXT: smov w19, v1.h[4]
+; CHECK-GI-NEXT: smov w7, v1.h[1]
+; CHECK-GI-NEXT: smov w20, v1.h[5]
+; CHECK-GI-NEXT: smov w10, v3.h[2]
+; CHECK-GI-NEXT: smov w8, v3.h[3]
+; CHECK-GI-NEXT: smov w11, v3.h[6]
+; CHECK-GI-NEXT: smov w9, v3.h[7]
+; CHECK-GI-NEXT: fmov s3, w17
+; CHECK-GI-NEXT: fmov s4, w0
+; CHECK-GI-NEXT: fmov s5, w2
+; CHECK-GI-NEXT: fmov s6, w4
+; CHECK-GI-NEXT: fmov s7, w6
+; CHECK-GI-NEXT: fmov s16, w19
+; CHECK-GI-NEXT: fmov s17, w16
+; CHECK-GI-NEXT: smov w12, v2.h[2]
+; CHECK-GI-NEXT: smov w13, v2.h[6]
+; CHECK-GI-NEXT: smov w14, v1.h[2]
+; CHECK-GI-NEXT: smov w15, v1.h[6]
+; CHECK-GI-NEXT: mov v3.s[1], w18
+; CHECK-GI-NEXT: mov v4.s[1], w1
+; CHECK-GI-NEXT: mov v5.s[1], w3
+; CHECK-GI-NEXT: mov v6.s[1], w5
+; CHECK-GI-NEXT: mov v7.s[1], w7
+; CHECK-GI-NEXT: mov v16.s[1], w20
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v17.s[1], wzr
+; CHECK-GI-NEXT: smov w16, v2.h[3]
+; CHECK-GI-NEXT: smov w17, v2.h[7]
+; CHECK-GI-NEXT: smov w18, v1.h[3]
+; CHECK-GI-NEXT: smov w0, v1.h[7]
+; CHECK-GI-NEXT: mov v3.s[2], w10
+; CHECK-GI-NEXT: mov v4.s[2], w11
+; CHECK-GI-NEXT: mov v5.s[2], w12
+; CHECK-GI-NEXT: mov v6.s[2], w13
+; CHECK-GI-NEXT: mov v7.s[2], w14
+; CHECK-GI-NEXT: mov v16.s[2], w15
+; CHECK-GI-NEXT: mov v17.s[2], wzr
+; CHECK-GI-NEXT: mov v3.s[3], w8
+; CHECK-GI-NEXT: mov v4.s[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v5.s[3], w16
+; CHECK-GI-NEXT: mov v6.s[3], w17
+; CHECK-GI-NEXT: mov v7.s[3], w18
+; CHECK-GI-NEXT: mov v16.s[3], w0
+; CHECK-GI-NEXT: mov v17.s[3], wzr
+; CHECK-GI-NEXT: add v1.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v2.4s, v5.4s, v6.4s
+; CHECK-GI-NEXT: add v3.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v0.4s, v17.4s, v0.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldp x26, x25, [sp], #64 // 16-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <25 x i8>, ptr %a
@@ -2948,349 +3331,535 @@ define i32 @test_sdot_v25i8_double(<25 x i8> %a, <25 x i8> %b, <25 x i8> %c, <25
;
; CHECK-GI-LABEL: test_sdot_v25i8_double:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: stp d11, d10, [sp, #-48]! // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d9, d8, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: str x29, [sp, #32] // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 48
+; CHECK-GI-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
; CHECK-GI-NEXT: .cfi_offset w29, -16
-; CHECK-GI-NEXT: .cfi_offset b8, -24
-; CHECK-GI-NEXT: .cfi_offset b9, -32
-; CHECK-GI-NEXT: .cfi_offset b10, -40
-; CHECK-GI-NEXT: .cfi_offset b11, -48
-; CHECK-GI-NEXT: sxtb w8, w0
-; CHECK-GI-NEXT: sxtb w10, w4
-; CHECK-GI-NEXT: sxtb w9, w1
-; CHECK-GI-NEXT: sxtb w11, w2
-; CHECK-GI-NEXT: sxtb w13, w6
-; CHECK-GI-NEXT: ldr w12, [sp, #72]
+; CHECK-GI-NEXT: lsl w8, w0, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #16]
+; CHECK-GI-NEXT: lsl w10, w1, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #24]
+; CHECK-GI-NEXT: lsl w12, w4, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #56]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #64]
; CHECK-GI-NEXT: fmov s2, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #48]
-; CHECK-GI-NEXT: fmov s4, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #80]
-; CHECK-GI-NEXT: ldr w14, [sp, #128]
-; CHECK-GI-NEXT: ldr w15, [sp, #152]
-; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w8, w11, #8
+; CHECK-GI-NEXT: lsl w11, w2, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: fmov s4, w9
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #112]
+; CHECK-GI-NEXT: mov v2.h[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #32]
+; CHECK-GI-NEXT: sbfx w9, w11, #8, #8
+; CHECK-GI-NEXT: lsl w11, w3, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
; CHECK-GI-NEXT: fmov s1, wzr
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v4.h[1], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #152]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: mov v2.s[1], w9
-; CHECK-GI-NEXT: sxtb w9, w5
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: fmov s3, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #88]
-; CHECK-GI-NEXT: ldr x29, [sp, #32] // 8-byte Folded Reload
-; CHECK-GI-NEXT: mov v4.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #56]
-; CHECK-GI-NEXT: fmov s5, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w3
+; CHECK-GI-NEXT: mov v2.h[2], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #40]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w8, w8, #8
; CHECK-GI-NEXT: mov v1.s[1], wzr
-; CHECK-GI-NEXT: mov v2.s[2], w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #64]
-; CHECK-GI-NEXT: mov v5.s[1], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #104]
+; CHECK-GI-NEXT: mov v4.h[2], w10
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #160]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v3.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #96]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v4.s[2], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #120]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v2.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #112]
-; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v2.h[3], w11
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #48]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: fmov s3, w8
; CHECK-GI-NEXT: mov v1.s[2], wzr
+; CHECK-GI-NEXT: mov v4.h[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #80]
+; CHECK-GI-NEXT: lsl w8, w11, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #168]
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v3.s[2], w11
-; CHECK-GI-NEXT: sxtb w11, w10
-; CHECK-GI-NEXT: mov v5.s[2], w9
-; CHECK-GI-NEXT: sxtb w9, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #144]
-; CHECK-GI-NEXT: ldr w10, [sp, #136]
-; CHECK-GI-NEXT: fmov s6, w11
-; CHECK-GI-NEXT: sxtb w11, w7
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v2.h[4], w12
+; CHECK-GI-NEXT: lsl w12, w5, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v3.h[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #88]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v4.h[4], w8
+; CHECK-GI-NEXT: lsl w8, w10, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #176]
+; CHECK-GI-NEXT: mov v2.h[5], w12
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: lsl w12, w6, #8
+; CHECK-GI-NEXT: fmov s6, w9
+; CHECK-GI-NEXT: sbfx w15, w8, #8, #8
+; CHECK-GI-NEXT: lsl w9, w10, #8
+; CHECK-GI-NEXT: mov v3.h[2], w11
+; CHECK-GI-NEXT: sbfx w11, w12, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #96]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v4.h[5], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #224]
+; CHECK-GI-NEXT: mov v6.h[1], w15
+; CHECK-GI-NEXT: mov v2.h[6], w11
+; CHECK-GI-NEXT: lsl w15, w7, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #184]
+; CHECK-GI-NEXT: ldr w12, [sp, #104]
+; CHECK-GI-NEXT: mov v3.h[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #216]
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v2.h[7], w15
+; CHECK-GI-NEXT: lsl w15, w9, #8
+; CHECK-GI-NEXT: mov v4.h[6], w14
+; CHECK-GI-NEXT: mov v6.h[2], w10
+; CHECK-GI-NEXT: lsl w10, w13, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w15, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #288]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v3.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #192]
+; CHECK-GI-NEXT: fmov s5, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #232]
+; CHECK-GI-NEXT: ldr w9, [sp, #120]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v6.h[3], w12
+; CHECK-GI-NEXT: ldr w8, [sp, #72]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: mov v5.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #184]
-; CHECK-GI-NEXT: mov v4.s[3], w11
-; CHECK-GI-NEXT: mov v6.s[1], w9
-; CHECK-GI-NEXT: fmov s7, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #216]
-; CHECK-GI-NEXT: sxtb w9, w12
-; CHECK-GI-NEXT: sxtb w12, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: ldr w11, [sp, #160]
-; CHECK-GI-NEXT: mov v7.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #224]
-; CHECK-GI-NEXT: mov v3.s[3], w9
-; CHECK-GI-NEXT: mov v6.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #192]
-; CHECK-GI-NEXT: fmov s16, w8
-; CHECK-GI-NEXT: fmov s18, w13
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w9, [sp, #168]
-; CHECK-GI-NEXT: ldr w13, [sp, #208]
-; CHECK-GI-NEXT: mov v7.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #256]
-; CHECK-GI-NEXT: ldr w8, [sp, #176]
-; CHECK-GI-NEXT: mov v16.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #200]
-; CHECK-GI-NEXT: mov v18.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #232]
-; CHECK-GI-NEXT: mov v6.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #248]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v16.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #240]
-; CHECK-GI-NEXT: mov v7.s[3], w9
-; CHECK-GI-NEXT: mov v18.s[2], w14
-; CHECK-GI-NEXT: fmov s17, w10
+; CHECK-GI-NEXT: mov v5.h[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #280]
+; CHECK-GI-NEXT: sbfx w15, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w13, #8, #8
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #240]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v3.h[5], w15
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v5.h[2], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #296]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: fmov s7, w10
+; CHECK-GI-NEXT: ldr w10, [sp, #200]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v6.h[4], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #304]
+; CHECK-GI-NEXT: ldr w11, [sp, #128]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v5.h[3], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #208]
+; CHECK-GI-NEXT: mov v7.h[1], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #248]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v6.h[5], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #272]
+; CHECK-GI-NEXT: mov v3.h[6], w10
+; CHECK-GI-NEXT: lsl w10, w14, #8
+; CHECK-GI-NEXT: sbfx w14, w15, #8, #8
+; CHECK-GI-NEXT: mov v7.h[2], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #256]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #320]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: mov v5.h[4], w13
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #312]
+; CHECK-GI-NEXT: mov v3.h[7], w10
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v4.h[7], w8
+; CHECK-GI-NEXT: mov v7.h[3], w14
; CHECK-GI-NEXT: ldr w14, [sp, #264]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w9, [sp, #288]
-; CHECK-GI-NEXT: ldr w10, [sp, #272]
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: ldr w15, [sp, #392]
-; CHECK-GI-NEXT: mov v17.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #280]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v18.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #312]
-; CHECK-GI-NEXT: mov v16.s[3], w13
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w13, [sp, #296]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w8, w9, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #136]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v5.h[5], w12
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mul v16.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: sbfx w12, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: mov v6.h[6], w11
+; CHECK-GI-NEXT: mov v7.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #328]
+; CHECK-GI-NEXT: ldr w10, [sp, #144]
+; CHECK-GI-NEXT: mov v5.h[6], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #336]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: smov w9, v16.h[0]
+; CHECK-GI-NEXT: smov w15, v16.h[4]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: smov w17, v16.h[5]
; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w12, w12
; CHECK-GI-NEXT: mov v0.s[3], wzr
-; CHECK-GI-NEXT: mov v17.s[2], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #320]
-; CHECK-GI-NEXT: fmov s20, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #344]
-; CHECK-GI-NEXT: fmov s19, w12
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: ldr w12, [sp, #304]
-; CHECK-GI-NEXT: mul v4.4s, v4.4s, v18.4s
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v20.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #352]
-; CHECK-GI-NEXT: mov v19.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #328]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s21, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #336]
+; CHECK-GI-NEXT: sbfx w11, w13, #8, #8
+; CHECK-GI-NEXT: smov w13, v16.h[1]
+; CHECK-GI-NEXT: mov v7.h[5], w14
+; CHECK-GI-NEXT: mov v5.h[7], w8
+; CHECK-GI-NEXT: ldr w14, [sp, #344]
+; CHECK-GI-NEXT: ldr w8, [sp, #352]
+; CHECK-GI-NEXT: fmov s2, w9
+; CHECK-GI-NEXT: fmov s3, w15
+; CHECK-GI-NEXT: lsl w9, w12, #8
+; CHECK-GI-NEXT: sbfx w12, w16, #8, #8
; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: mov v17.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #376]
-; CHECK-GI-NEXT: mov v20.s[2], w13
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w13, [sp, #368]
-; CHECK-GI-NEXT: mov v21.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #360]
-; CHECK-GI-NEXT: mov v19.s[2], w14
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w14, [sp, #384]
-; CHECK-GI-NEXT: mla v4.4s, v2.4s, v16.4s
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v20.s[3], w12
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: mul w10, w8, w10
-; CHECK-GI-NEXT: mov v21.s[2], w9
-; CHECK-GI-NEXT: mov v19.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #416]
-; CHECK-GI-NEXT: sxtb w13, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: ldr w9, [sp, #400]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s22, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #432]
-; CHECK-GI-NEXT: fmov s23, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #448]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v21.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #424]
-; CHECK-GI-NEXT: fmov s25, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #480]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v23.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #456]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s24, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #440]
-; CHECK-GI-NEXT: mov v25.s[1], w12
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: mov v7.h[6], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #360]
+; CHECK-GI-NEXT: smov w15, v16.h[3]
+; CHECK-GI-NEXT: mov v2.s[1], w13
+; CHECK-GI-NEXT: smov w13, v16.h[2]
+; CHECK-GI-NEXT: mov v6.h[7], w12
+; CHECK-GI-NEXT: smov w12, v16.h[6]
+; CHECK-GI-NEXT: mov v3.s[1], w17
+; CHECK-GI-NEXT: mul v18.8h, v4.8h, v5.8h
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w16, w9, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #368]
+; CHECK-GI-NEXT: mov v2.s[2], w13
+; CHECK-GI-NEXT: smov w13, v16.h[7]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v3.s[2], w12
+; CHECK-GI-NEXT: sbfx w12, w8, #8, #8
+; CHECK-GI-NEXT: mul w8, w10, w14
+; CHECK-GI-NEXT: smov w10, v18.h[0]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #376]
+; CHECK-GI-NEXT: fmov s16, w12
+; CHECK-GI-NEXT: smov w12, v18.h[1]
+; CHECK-GI-NEXT: mov v7.h[7], w16
+; CHECK-GI-NEXT: mov v2.s[3], w15
+; CHECK-GI-NEXT: smov w15, v18.h[4]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v3.s[3], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #416]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: fmov s4, w10
+; CHECK-GI-NEXT: mov v16.h[1], w11
+; CHECK-GI-NEXT: ldr w10, [sp, #424]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #384]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: fmov s5, w15
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #432]
+; CHECK-GI-NEXT: mov v4.s[1], w12
+; CHECK-GI-NEXT: smov w12, v18.h[5]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: fmov s17, w13
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mul v7.8h, v6.8h, v7.8h
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #392]
+; CHECK-GI-NEXT: ldr w13, [sp, #400]
+; CHECK-GI-NEXT: mov v5.s[1], w12
+; CHECK-GI-NEXT: smov w12, v18.h[2]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v17.h[1], w10
+; CHECK-GI-NEXT: mov v16.h[3], w14
+; CHECK-GI-NEXT: ldr w10, [sp, #440]
+; CHECK-GI-NEXT: smov w14, v18.h[6]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #456]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sxth w8, w8
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: mov v4.s[2], w12
+; CHECK-GI-NEXT: smov w12, v18.h[3]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v17.h[2], w15
+; CHECK-GI-NEXT: mov v16.h[4], w11
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v5.s[2], w14
+; CHECK-GI-NEXT: smov w14, v18.h[7]
+; CHECK-GI-NEXT: ldr w15, [sp, #448]
+; CHECK-GI-NEXT: ldr w11, [sp, #408]
+; CHECK-GI-NEXT: mov v4.s[3], w12
+; CHECK-GI-NEXT: smov w12, v7.h[0]
+; CHECK-GI-NEXT: mov v17.h[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #480]
+; CHECK-GI-NEXT: mov v16.h[5], w9
+; CHECK-GI-NEXT: lsl w9, w13, #8
+; CHECK-GI-NEXT: lsl w13, w15, #8
+; CHECK-GI-NEXT: mov v5.s[3], w14
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: smov w14, v7.h[1]
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: fmov s6, w12
; CHECK-GI-NEXT: ldr w12, [sp, #488]
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: fmov s26, w11
-; CHECK-GI-NEXT: ldr w15, [sp, #504]
-; CHECK-GI-NEXT: ldr w11, [sp, #472]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v24.s[1], w14
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v17.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #496]
+; CHECK-GI-NEXT: fmov s18, w10
+; CHECK-GI-NEXT: ldr w10, [sp, #552]
+; CHECK-GI-NEXT: mov v6.s[1], w14
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
; CHECK-GI-NEXT: ldr w14, [sp, #464]
-; CHECK-GI-NEXT: mov v23.s[2], w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w8, [sp, #408]
-; CHECK-GI-NEXT: mov v26.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #496]
-; CHECK-GI-NEXT: mov v25.s[2], w10
+; CHECK-GI-NEXT: mov v16.h[6], w9
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v18.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #560]
+; CHECK-GI-NEXT: mov v17.h[5], w15
+; CHECK-GI-NEXT: sbfx w15, w10, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
; CHECK-GI-NEXT: ldr w10, [sp, #512]
-; CHECK-GI-NEXT: sxtb w9, w14
-; CHECK-GI-NEXT: ldr w14, [sp, #520]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v22.s[1], wzr
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v24.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #528]
-; CHECK-GI-NEXT: mov v26.s[2], w12
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: sxtb w13, w15
-; CHECK-GI-NEXT: fmov s27, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #584]
-; CHECK-GI-NEXT: ldr w15, [sp, #552]
-; CHECK-GI-NEXT: mov v25.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #544]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v24.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #560]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v26.s[3], w13
-; CHECK-GI-NEXT: sxtb w13, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: fmov s29, w10
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s28, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #616]
-; CHECK-GI-NEXT: mov v27.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #592]
-; CHECK-GI-NEXT: ldr w15, [sp, #568]
-; CHECK-GI-NEXT: mov v23.s[3], w8
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w8, [sp, #536]
-; CHECK-GI-NEXT: ldr w10, [sp, #576]
-; CHECK-GI-NEXT: mov v28.s[1], w14
-; CHECK-GI-NEXT: ldr w14, [sp, #624]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: fmov s30, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #600]
-; CHECK-GI-NEXT: mov v27.s[2], w9
-; CHECK-GI-NEXT: mov v29.s[1], w13
-; CHECK-GI-NEXT: sxtb w13, w14
-; CHECK-GI-NEXT: sxtb w14, w15
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w9, [sp, #608]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v30.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #632]
-; CHECK-GI-NEXT: mov v28.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #640]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v29.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #648]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v27.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #664]
-; CHECK-GI-NEXT: mov v30.s[2], w13
-; CHECK-GI-NEXT: mov v28.s[3], w14
+; CHECK-GI-NEXT: fmov s19, w15
+; CHECK-GI-NEXT: ldr w15, [sp, #616]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v16.h[7], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #504]
+; CHECK-GI-NEXT: mov v18.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #568]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v17.h[6], w14
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #576]
+; CHECK-GI-NEXT: mov v19.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #624]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w11, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: fmov s20, w14
; CHECK-GI-NEXT: ldr w14, [sp, #680]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w13, [sp, #656]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: mov v29.s[3], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #688]
-; CHECK-GI-NEXT: fmov s31, w12
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: ldr w12, [sp, #752]
-; CHECK-GI-NEXT: mov v30.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #744]
-; CHECK-GI-NEXT: fmov s8, w14
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w14, [sp, #712]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v31.s[1], w13
+; CHECK-GI-NEXT: mov v18.h[3], w16
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v19.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #632]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v20.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #688]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v18.h[4], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #584]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v19.h[3], w15
+; CHECK-GI-NEXT: fmov s21, w14
+; CHECK-GI-NEXT: ldr w15, [sp, #640]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v20.h[2], w13
; CHECK-GI-NEXT: ldr w13, [sp, #696]
-; CHECK-GI-NEXT: mov v8.s[1], w9
-; CHECK-GI-NEXT: sxtb w14, w14
-; CHECK-GI-NEXT: ldr w9, [sp, #720]
-; CHECK-GI-NEXT: fmov s9, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #776]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: fmov s10, w14
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v22.s[2], wzr
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v31.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #704]
-; CHECK-GI-NEXT: mov v9.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #760]
-; CHECK-GI-NEXT: mov v8.s[2], w13
-; CHECK-GI-NEXT: mul w10, w10, w11
-; CHECK-GI-NEXT: mov v10.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #728]
-; CHECK-GI-NEXT: sxtb w11, w12
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mul v5.4s, v5.4s, v20.4s
-; CHECK-GI-NEXT: mul v7.4s, v7.4s, v21.4s
-; CHECK-GI-NEXT: mul v18.4s, v25.4s, v30.4s
-; CHECK-GI-NEXT: mov v22.s[3], wzr
-; CHECK-GI-NEXT: fmov s11, w10
-; CHECK-GI-NEXT: mov v9.s[2], w11
-; CHECK-GI-NEXT: ldr w10, [sp, #768]
-; CHECK-GI-NEXT: mov v8.s[3], w8
-; CHECK-GI-NEXT: sxtb w8, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #672]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #520]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v21.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #592]
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v19.h[4], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #704]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v20.h[3], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #648]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v21.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #600]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v18.h[5], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #712]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v19.h[5], w12
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #656]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v21.h[3], w10
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #608]
+; CHECK-GI-NEXT: mov v20.h[4], w15
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #528]
+; CHECK-GI-NEXT: ldr w15, [sp, #664]
+; CHECK-GI-NEXT: mov v19.h[6], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #720]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v21.h[4], w11
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w16, w10, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: mov v20.h[5], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #728]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v19.h[7], w16
+; CHECK-GI-NEXT: ldr w9, [sp, #472]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v18.h[6], w14
+; CHECK-GI-NEXT: sbfx w14, w15, #8, #8
+; CHECK-GI-NEXT: mov v21.h[5], w13
+; CHECK-GI-NEXT: ldr w15, [sp, #672]
+; CHECK-GI-NEXT: ldr w11, [sp, #536]
+; CHECK-GI-NEXT: ldr w13, [sp, #736]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: mov v20.h[6], w14
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mul v19.8h, v16.8h, v19.8h
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v21.h[6], w12
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: smov w14, v7.h[2]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: smov w12, v7.h[4]
+; CHECK-GI-NEXT: mov v17.h[7], w9
+; CHECK-GI-NEXT: mov v20.h[7], w15
+; CHECK-GI-NEXT: smov w9, v7.h[5]
+; CHECK-GI-NEXT: mov v18.h[7], w11
+; CHECK-GI-NEXT: smov w11, v19.h[4]
+; CHECK-GI-NEXT: ldr w15, [sp, #744]
+; CHECK-GI-NEXT: mov v21.h[7], w13
+; CHECK-GI-NEXT: mov v6.s[2], w14
+; CHECK-GI-NEXT: smov w14, v19.h[0]
+; CHECK-GI-NEXT: fmov s16, w12
+; CHECK-GI-NEXT: smov w13, v19.h[5]
+; CHECK-GI-NEXT: smov w12, v19.h[1]
+; CHECK-GI-NEXT: mul v20.8h, v17.8h, v20.8h
+; CHECK-GI-NEXT: ldr w10, [sp, #544]
+; CHECK-GI-NEXT: add v3.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: mul v22.8h, v18.8h, v21.8h
+; CHECK-GI-NEXT: fmov s18, w11
+; CHECK-GI-NEXT: mov v16.s[1], w9
+; CHECK-GI-NEXT: fmov s17, w14
+; CHECK-GI-NEXT: smov w14, v7.h[6]
+; CHECK-GI-NEXT: smov w11, v19.h[2]
+; CHECK-GI-NEXT: smov w9, v7.h[3]
; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mla v5.4s, v3.4s, v17.4s
-; CHECK-GI-NEXT: mov v11.s[1], wzr
-; CHECK-GI-NEXT: mov v10.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #736]
-; CHECK-GI-NEXT: mov v9.s[3], w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mla v7.4s, v6.4s, v19.4s
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mul v20.4s, v26.4s, v8.4s
-; CHECK-GI-NEXT: mla v18.4s, v23.4s, v29.4s
-; CHECK-GI-NEXT: mov v31.s[3], w9
-; CHECK-GI-NEXT: add v1.4s, v22.4s, v1.4s
-; CHECK-GI-NEXT: add v2.4s, v4.4s, v5.4s
-; CHECK-GI-NEXT: mov v11.s[2], wzr
-; CHECK-GI-NEXT: mov v10.s[3], w8
-; CHECK-GI-NEXT: mul v21.4s, v28.4s, v9.4s
-; CHECK-GI-NEXT: ldp d9, d8, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: add v1.4s, v7.4s, v1.4s
-; CHECK-GI-NEXT: mla v20.4s, v24.4s, v31.4s
-; CHECK-GI-NEXT: mov v11.s[3], wzr
-; CHECK-GI-NEXT: mla v21.4s, v27.4s, v10.4s
+; CHECK-GI-NEXT: fmov s21, w8
+; CHECK-GI-NEXT: mov v18.s[1], w13
+; CHECK-GI-NEXT: sxtb w13, w15
+; CHECK-GI-NEXT: smov w15, v20.h[0]
+; CHECK-GI-NEXT: mov v17.s[1], w12
+; CHECK-GI-NEXT: smov w8, v7.h[7]
+; CHECK-GI-NEXT: smov w12, v19.h[6]
+; CHECK-GI-NEXT: mov v16.s[2], w14
+; CHECK-GI-NEXT: smov w14, v20.h[1]
+; CHECK-GI-NEXT: mul w10, w10, w13
+; CHECK-GI-NEXT: smov w13, v20.h[4]
+; CHECK-GI-NEXT: smov w16, v20.h[5]
+; CHECK-GI-NEXT: mov v21.s[1], wzr
+; CHECK-GI-NEXT: fmov s7, w15
+; CHECK-GI-NEXT: smov w15, v20.h[2]
+; CHECK-GI-NEXT: mov v6.s[3], w9
+; CHECK-GI-NEXT: mov v17.s[2], w11
+; CHECK-GI-NEXT: smov w11, v22.h[0]
+; CHECK-GI-NEXT: sxth w10, w10
+; CHECK-GI-NEXT: mov v18.s[2], w12
+; CHECK-GI-NEXT: smov w12, v22.h[1]
+; CHECK-GI-NEXT: mov v16.s[3], w8
+; CHECK-GI-NEXT: mov v7.s[1], w14
+; CHECK-GI-NEXT: smov w14, v22.h[4]
+; CHECK-GI-NEXT: fmov s23, w13
+; CHECK-GI-NEXT: smov w13, v22.h[5]
+; CHECK-GI-NEXT: fmov s26, w10
+; CHECK-GI-NEXT: smov w10, v19.h[7]
+; CHECK-GI-NEXT: fmov s24, w11
+; CHECK-GI-NEXT: smov w11, v20.h[6]
+; CHECK-GI-NEXT: mov v21.s[2], wzr
+; CHECK-GI-NEXT: mov v23.s[1], w16
+; CHECK-GI-NEXT: add v4.4s, v6.4s, v16.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: fmov s25, w14
+; CHECK-GI-NEXT: smov w14, v22.h[2]
+; CHECK-GI-NEXT: mov v26.s[1], wzr
+; CHECK-GI-NEXT: mov v24.s[1], w12
+; CHECK-GI-NEXT: smov w12, v19.h[3]
+; CHECK-GI-NEXT: mov v7.s[2], w15
+; CHECK-GI-NEXT: smov w15, v20.h[3]
+; CHECK-GI-NEXT: mov v18.s[3], w10
+; CHECK-GI-NEXT: mov v21.s[3], wzr
+; CHECK-GI-NEXT: mov v25.s[1], w13
+; CHECK-GI-NEXT: smov w13, v22.h[6]
+; CHECK-GI-NEXT: mov v23.s[2], w11
+; CHECK-GI-NEXT: smov w11, v20.h[7]
+; CHECK-GI-NEXT: mov v26.s[2], wzr
+; CHECK-GI-NEXT: mov v24.s[2], w14
+; CHECK-GI-NEXT: smov w14, v22.h[3]
+; CHECK-GI-NEXT: mov v17.s[3], w12
+; CHECK-GI-NEXT: mov v7.s[3], w15
+; CHECK-GI-NEXT: add v1.4s, v21.4s, v1.4s
+; CHECK-GI-NEXT: mov v25.s[2], w13
+; CHECK-GI-NEXT: smov w13, v22.h[7]
+; CHECK-GI-NEXT: mov v23.s[3], w11
+; CHECK-GI-NEXT: mov v26.s[3], wzr
+; CHECK-GI-NEXT: mov v24.s[3], w14
+; CHECK-GI-NEXT: add v5.4s, v17.4s, v18.4s
+; CHECK-GI-NEXT: add v1.4s, v4.4s, v1.4s
+; CHECK-GI-NEXT: mov v25.s[3], w13
+; CHECK-GI-NEXT: add v6.4s, v7.4s, v23.4s
+; CHECK-GI-NEXT: add v0.4s, v26.4s, v0.4s
; CHECK-GI-NEXT: add v1.4s, v2.4s, v1.4s
-; CHECK-GI-NEXT: add v3.4s, v18.4s, v20.4s
-; CHECK-GI-NEXT: add v0.4s, v11.4s, v0.4s
+; CHECK-GI-NEXT: add v7.4s, v24.4s, v25.4s
+; CHECK-GI-NEXT: add v3.4s, v5.4s, v6.4s
; CHECK-GI-NEXT: addv s1, v1.4s
-; CHECK-GI-NEXT: add v0.4s, v21.4s, v0.4s
+; CHECK-GI-NEXT: add v0.4s, v7.4s, v0.4s
; CHECK-GI-NEXT: fmov w8, s1
; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w9, s0
; CHECK-GI-NEXT: add w0, w8, w9
-; CHECK-GI-NEXT: ldp d11, d10, [sp], #48 // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-GI-NEXT: ret
entry:
%az = sext <25 x i8> %a to <25 x i32>
@@ -3972,197 +4541,412 @@ define i32 @test_udot_v33i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_udot_v33i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
-; CHECK-GI-NEXT: .cfi_offset b8, -16
-; CHECK-GI-NEXT: ldp q21, q25, [x1]
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w21, -24
+; CHECK-GI-NEXT: .cfi_offset w22, -32
+; CHECK-GI-NEXT: .cfi_offset w23, -40
+; CHECK-GI-NEXT: .cfi_offset w24, -48
+; CHECK-GI-NEXT: .cfi_offset w25, -56
+; CHECK-GI-NEXT: .cfi_offset w26, -64
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q7, q16, [x1]
; CHECK-GI-NEXT: fmov s5, wzr
-; CHECK-GI-NEXT: ldp q26, q22, [x0]
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
; CHECK-GI-NEXT: fmov s6, wzr
; CHECK-GI-NEXT: fmov s0, wzr
; CHECK-GI-NEXT: fmov s1, wzr
; CHECK-GI-NEXT: fmov s3, wzr
-; CHECK-GI-NEXT: umov w8, v21.b[0]
-; CHECK-GI-NEXT: umov w9, v21.b[4]
-; CHECK-GI-NEXT: umov w10, v21.b[1]
-; CHECK-GI-NEXT: umov w13, v21.b[8]
-; CHECK-GI-NEXT: umov w11, v21.b[5]
-; CHECK-GI-NEXT: umov w14, v21.b[9]
-; CHECK-GI-NEXT: umov w15, v25.b[0]
-; CHECK-GI-NEXT: umov w12, v21.b[2]
; CHECK-GI-NEXT: fmov s2, wzr
+; CHECK-GI-NEXT: mov b23, v7.b[7]
+; CHECK-GI-NEXT: mov b17, v7.b[1]
+; CHECK-GI-NEXT: fmov w11, s7
+; CHECK-GI-NEXT: mov b18, v7.b[2]
+; CHECK-GI-NEXT: mov b19, v7.b[3]
+; CHECK-GI-NEXT: mov b20, v7.b[4]
+; CHECK-GI-NEXT: mov b21, v7.b[5]
+; CHECK-GI-NEXT: mov b22, v7.b[6]
+; CHECK-GI-NEXT: mov b24, v7.b[8]
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: mov b25, v7.b[9]
+; CHECK-GI-NEXT: mov b26, v7.b[10]
+; CHECK-GI-NEXT: mov b27, v7.b[11]
+; CHECK-GI-NEXT: mov b28, v7.b[12]
+; CHECK-GI-NEXT: mov b29, v7.b[13]
+; CHECK-GI-NEXT: mov b30, v7.b[14]
+; CHECK-GI-NEXT: mov b7, v7.b[15]
+; CHECK-GI-NEXT: fmov w7, s23
+; CHECK-GI-NEXT: mov b23, v16.b[7]
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: fmov w9, s18
+; CHECK-GI-NEXT: fmov w13, s19
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: mov b17, v16.b[2]
+; CHECK-GI-NEXT: fmov w12, s20
+; CHECK-GI-NEXT: fmov w16, s25
+; CHECK-GI-NEXT: fmov w23, s21
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov b18, v16.b[1]
+; CHECK-GI-NEXT: stp s23, s7, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: fmov s7, w11
+; CHECK-GI-NEXT: fmov w5, s17
+; CHECK-GI-NEXT: fmov w27, s26
+; CHECK-GI-NEXT: mov b21, v16.b[5]
+; CHECK-GI-NEXT: fmov s17, w8
+; CHECK-GI-NEXT: uxtb w8, w12
+; CHECK-GI-NEXT: fmov w20, s22
+; CHECK-GI-NEXT: mov v7.h[1], w10
+; CHECK-GI-NEXT: uxtb w10, w16
+; CHECK-GI-NEXT: mov b19, v16.b[3]
+; CHECK-GI-NEXT: mov b22, v16.b[4]
+; CHECK-GI-NEXT: mov b20, v16.b[6]
+; CHECK-GI-NEXT: fmov w21, s27
+; CHECK-GI-NEXT: mov v17.h[1], w10
+; CHECK-GI-NEXT: fmov w24, s28
+; CHECK-GI-NEXT: mov b24, v16.b[8]
+; CHECK-GI-NEXT: fmov w22, s29
+; CHECK-GI-NEXT: mov b26, v16.b[9]
+; CHECK-GI-NEXT: fmov w4, s30
+; CHECK-GI-NEXT: uxtb w10, w21
+; CHECK-GI-NEXT: mov v7.h[2], w9
+; CHECK-GI-NEXT: uxtb w9, w13
+; CHECK-GI-NEXT: str s20, [sp] // 4-byte Folded Spill
+; CHECK-GI-NEXT: mov b25, v16.b[10]
+; CHECK-GI-NEXT: fmov w25, s18
+; CHECK-GI-NEXT: uxtb w22, w22
+; CHECK-GI-NEXT: mov b27, v16.b[11]
+; CHECK-GI-NEXT: mov b28, v16.b[12]
+; CHECK-GI-NEXT: mov b29, v16.b[13]
+; CHECK-GI-NEXT: mov b30, v16.b[14]
+; CHECK-GI-NEXT: fmov w26, s16
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w27
+; CHECK-GI-NEXT: mov b31, v16.b[15]
+; CHECK-GI-NEXT: ldp q18, q16, [x0]
+; CHECK-GI-NEXT: fmov w2, s21
+; CHECK-GI-NEXT: uxtb w26, w26
+; CHECK-GI-NEXT: mov v17.h[2], w9
+; CHECK-GI-NEXT: fmov w14, s22
+; CHECK-GI-NEXT: fmov w3, s25
+; CHECK-GI-NEXT: fmov w15, s19
+; CHECK-GI-NEXT: fmov w19, s24
+; CHECK-GI-NEXT: mov v7.h[4], w8
+; CHECK-GI-NEXT: uxtb w8, w23
+; CHECK-GI-NEXT: mov b21, v18.b[2]
+; CHECK-GI-NEXT: mov b22, v18.b[1]
+; CHECK-GI-NEXT: mov b25, v18.b[5]
+; CHECK-GI-NEXT: mov b23, v18.b[6]
+; CHECK-GI-NEXT: uxtb w19, w19
+; CHECK-GI-NEXT: uxtb w3, w3
+; CHECK-GI-NEXT: mov v17.h[3], w10
+; CHECK-GI-NEXT: uxtb w10, w24
+; CHECK-GI-NEXT: uxtb w24, w7
+; CHECK-GI-NEXT: mov b19, v18.b[3]
+; CHECK-GI-NEXT: mov v7.h[5], w8
+; CHECK-GI-NEXT: uxtb w8, w20
+; CHECK-GI-NEXT: fmov w29, s21
+; CHECK-GI-NEXT: mov b21, v18.b[10]
+; CHECK-GI-NEXT: fmov w9, s22
+; CHECK-GI-NEXT: fmov w6, s26
+; CHECK-GI-NEXT: mov v17.h[4], w10
+; CHECK-GI-NEXT: uxtb w10, w25
+; CHECK-GI-NEXT: fmov w17, s27
+; CHECK-GI-NEXT: mov b26, v18.b[4]
+; CHECK-GI-NEXT: fmov w18, s28
+; CHECK-GI-NEXT: fmov w16, s29
+; CHECK-GI-NEXT: mov v7.h[6], w8
+; CHECK-GI-NEXT: fmov w8, s18
+; CHECK-GI-NEXT: mov b24, v18.b[7]
+; CHECK-GI-NEXT: fmov w30, s21
+; CHECK-GI-NEXT: mov b20, v18.b[8]
+; CHECK-GI-NEXT: mov b27, v18.b[9]
+; CHECK-GI-NEXT: uxtb w16, w16
+; CHECK-GI-NEXT: mov b28, v18.b[11]
+; CHECK-GI-NEXT: mov b29, v18.b[12]
+; CHECK-GI-NEXT: fmov w23, s25
+; CHECK-GI-NEXT: mov b25, v18.b[13]
+; CHECK-GI-NEXT: fmov w21, s23
+; CHECK-GI-NEXT: mov v7.h[7], w24
+; CHECK-GI-NEXT: uxtb w24, w8
+; CHECK-GI-NEXT: uxtb w8, w9
+; CHECK-GI-NEXT: uxtb w9, w29
+; CHECK-GI-NEXT: mov b23, v18.b[14]
+; CHECK-GI-NEXT: mov b22, v18.b[15]
+; CHECK-GI-NEXT: fmov s21, w24
+; CHECK-GI-NEXT: fmov s18, w26
+; CHECK-GI-NEXT: fmov w28, s19
+; CHECK-GI-NEXT: mov b19, v16.b[1]
+; CHECK-GI-NEXT: mov v17.h[5], w22
+; CHECK-GI-NEXT: fmov w7, s20
+; CHECK-GI-NEXT: fmov w11, s27
+; CHECK-GI-NEXT: fmov w27, s26
+; CHECK-GI-NEXT: mov b20, v16.b[2]
+; CHECK-GI-NEXT: mov v21.h[1], w8
+; CHECK-GI-NEXT: uxtb w8, w4
+; CHECK-GI-NEXT: mov v18.h[1], w10
+; CHECK-GI-NEXT: uxtb w10, w5
+; CHECK-GI-NEXT: uxtb w7, w7
+; CHECK-GI-NEXT: fmov w24, s23
+; CHECK-GI-NEXT: mov b23, v16.b[6]
+; CHECK-GI-NEXT: fmov w4, s22
+; CHECK-GI-NEXT: mov b22, v16.b[8]
+; CHECK-GI-NEXT: mov v17.h[6], w8
+; CHECK-GI-NEXT: fmov w8, s19
+; CHECK-GI-NEXT: fmov s19, w19
+; CHECK-GI-NEXT: mov v21.h[2], w9
+; CHECK-GI-NEXT: uxtb w9, w28
+; CHECK-GI-NEXT: mov v18.h[2], w10
+; CHECK-GI-NEXT: uxtb w10, w6
+; CHECK-GI-NEXT: mov b27, v16.b[9]
+; CHECK-GI-NEXT: fmov w20, s24
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: mov b24, v16.b[3]
+; CHECK-GI-NEXT: fmov w5, s20
+; CHECK-GI-NEXT: mov v19.h[1], w10
+; CHECK-GI-NEXT: fmov w10, s23
+; CHECK-GI-NEXT: fmov s23, w7
+; CHECK-GI-NEXT: mov v21.h[3], w9
+; CHECK-GI-NEXT: uxtb w9, w11
+; CHECK-GI-NEXT: uxtb w11, w27
+; CHECK-GI-NEXT: uxtb w27, w30
+; CHECK-GI-NEXT: uxtb w5, w5
+; CHECK-GI-NEXT: fmov w7, s22
+; CHECK-GI-NEXT: uxtb w10, w10
+; CHECK-GI-NEXT: mov v23.h[1], w9
+; CHECK-GI-NEXT: fmov w9, s16
+; CHECK-GI-NEXT: mov b20, v16.b[10]
+; CHECK-GI-NEXT: fmov w22, s28
+; CHECK-GI-NEXT: fmov w25, s25
+; CHECK-GI-NEXT: uxtb w7, w7
+; CHECK-GI-NEXT: mov v21.h[4], w11
+; CHECK-GI-NEXT: fmov w11, s27
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov b25, v16.b[5]
+; CHECK-GI-NEXT: fmov w29, s24
+; CHECK-GI-NEXT: fmov s22, w7
+; CHECK-GI-NEXT: mov v23.h[2], w27
+; CHECK-GI-NEXT: mov b24, v16.b[11]
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: fmov w27, s20
+; CHECK-GI-NEXT: fmov s20, w9
+; CHECK-GI-NEXT: fmov w26, s29
+; CHECK-GI-NEXT: mov b26, v16.b[4]
+; CHECK-GI-NEXT: mov v19.h[2], w3
+; CHECK-GI-NEXT: uxtb w3, w29
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v22.h[1], w11
+; CHECK-GI-NEXT: uxtb w11, w15
+; CHECK-GI-NEXT: uxtb w15, w22
+; CHECK-GI-NEXT: uxtb w22, w23
+; CHECK-GI-NEXT: mov v20.h[1], w8
+; CHECK-GI-NEXT: fmov w6, s25
+; CHECK-GI-NEXT: mov v18.h[3], w11
+; CHECK-GI-NEXT: uxtb w11, w27
+; CHECK-GI-NEXT: mov v23.h[3], w15
+; CHECK-GI-NEXT: uxtb w15, w17
+; CHECK-GI-NEXT: uxtb w17, w21
+; CHECK-GI-NEXT: mov b25, v16.b[12]
+; CHECK-GI-NEXT: fmov w28, s24
+; CHECK-GI-NEXT: mov v21.h[5], w22
+; CHECK-GI-NEXT: mov v22.h[2], w11
+; CHECK-GI-NEXT: uxtb w11, w14
+; CHECK-GI-NEXT: uxtb w14, w26
+; CHECK-GI-NEXT: mov v20.h[2], w5
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: fmov w19, s26
+; CHECK-GI-NEXT: mov v18.h[4], w11
+; CHECK-GI-NEXT: uxtb w11, w28
+; CHECK-GI-NEXT: mov v23.h[4], w14
+; CHECK-GI-NEXT: uxtb w14, w25
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov b26, v16.b[13]
+; CHECK-GI-NEXT: fmov w7, s25
+; CHECK-GI-NEXT: mov v19.h[3], w15
+; CHECK-GI-NEXT: uxtb w15, w18
+; CHECK-GI-NEXT: uxtb w18, w19
+; CHECK-GI-NEXT: mov v21.h[6], w17
+; CHECK-GI-NEXT: uxtb w17, w20
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v22.h[3], w11
+; CHECK-GI-NEXT: uxtb w11, w2
+; CHECK-GI-NEXT: mov v20.h[3], w3
+; CHECK-GI-NEXT: mov v23.h[5], w14
+; CHECK-GI-NEXT: uxtb w14, w24
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v18.h[5], w11
+; CHECK-GI-NEXT: uxtb w11, w7
+; CHECK-GI-NEXT: fmov w8, s26
+; CHECK-GI-NEXT: mov v19.h[4], w15
+; CHECK-GI-NEXT: ldr w15, [sp] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v21.h[7], w17
+; CHECK-GI-NEXT: uxtb w17, w6
+; CHECK-GI-NEXT: mov v22.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: uxtb w8, w8
+; CHECK-GI-NEXT: uxtb w15, w15
+; CHECK-GI-NEXT: fmov w13, s30
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: mov v20.h[4], w18
+; CHECK-GI-NEXT: mov v23.h[6], w14
+; CHECK-GI-NEXT: mov v19.h[5], w16
+; CHECK-GI-NEXT: mov b27, v16.b[14]
+; CHECK-GI-NEXT: mul v24.8h, v7.8h, v21.8h
+; CHECK-GI-NEXT: mov v22.h[5], w8
+; CHECK-GI-NEXT: uxtb w8, w4
+; CHECK-GI-NEXT: mov b7, v16.b[7]
+; CHECK-GI-NEXT: mov b16, v16.b[15]
+; CHECK-GI-NEXT: fmov w12, s31
+; CHECK-GI-NEXT: mov v17.h[7], w11
+; CHECK-GI-NEXT: uxtb w11, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v20.h[5], w17
+; CHECK-GI-NEXT: mov v23.h[7], w8
+; CHECK-GI-NEXT: fmov w9, s27
+; CHECK-GI-NEXT: mov v18.h[6], w15
+; CHECK-GI-NEXT: uxtb w8, w12
+; CHECK-GI-NEXT: uxtb w13, w13
+; CHECK-GI-NEXT: mov v19.h[6], w11
+; CHECK-GI-NEXT: fmov w12, s16
+; CHECK-GI-NEXT: fmov w11, s7
; CHECK-GI-NEXT: fmov s4, wzr
+; CHECK-GI-NEXT: uxtb w9, w9
+; CHECK-GI-NEXT: mov v20.h[6], w10
+; CHECK-GI-NEXT: umov w10, v24.h[0]
+; CHECK-GI-NEXT: mul v21.8h, v17.8h, v23.8h
+; CHECK-GI-NEXT: mov v18.h[7], w13
; CHECK-GI-NEXT: mov v5.s[1], wzr
+; CHECK-GI-NEXT: uxtb w11, w11
+; CHECK-GI-NEXT: mov v19.h[7], w8
+; CHECK-GI-NEXT: uxtb w8, w12
+; CHECK-GI-NEXT: umov w12, v24.h[4]
+; CHECK-GI-NEXT: mov v22.h[6], w9
+; CHECK-GI-NEXT: umov w9, v24.h[1]
+; CHECK-GI-NEXT: mov v20.h[7], w11
+; CHECK-GI-NEXT: umov w11, v24.h[5]
+; CHECK-GI-NEXT: fmov s7, w10
+; CHECK-GI-NEXT: ldrb w10, [x1, #32]
+; CHECK-GI-NEXT: umov w13, v21.h[0]
+; CHECK-GI-NEXT: umov w14, v21.h[1]
+; CHECK-GI-NEXT: umov w15, v21.h[4]
; CHECK-GI-NEXT: mov v6.s[1], wzr
-; CHECK-GI-NEXT: fmov s7, w8
-; CHECK-GI-NEXT: fmov s17, w9
-; CHECK-GI-NEXT: umov w8, v21.b[6]
-; CHECK-GI-NEXT: fmov s16, w13
-; CHECK-GI-NEXT: umov w9, v21.b[3]
-; CHECK-GI-NEXT: umov w13, v21.b[7]
-; CHECK-GI-NEXT: fmov s18, w15
-; CHECK-GI-NEXT: umov w15, v25.b[4]
; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], w10
-; CHECK-GI-NEXT: umov w10, v21.b[12]
-; CHECK-GI-NEXT: mov v17.s[1], w11
-; CHECK-GI-NEXT: umov w11, v21.b[13]
-; CHECK-GI-NEXT: mov v16.s[1], w14
-; CHECK-GI-NEXT: umov w14, v25.b[1]
+; CHECK-GI-NEXT: fmov s16, w12
+; CHECK-GI-NEXT: mov v22.h[7], w8
+; CHECK-GI-NEXT: umov w12, v24.h[6]
+; CHECK-GI-NEXT: umov w8, v24.h[2]
+; CHECK-GI-NEXT: mov v7.s[1], w9
+; CHECK-GI-NEXT: ldrb w9, [x0, #32]
+; CHECK-GI-NEXT: fmov s17, w13
+; CHECK-GI-NEXT: mul v23.8h, v18.8h, v20.8h
+; CHECK-GI-NEXT: umov w13, v24.h[7]
+; CHECK-GI-NEXT: mov v16.s[1], w11
+; CHECK-GI-NEXT: umov w11, v21.h[5]
+; CHECK-GI-NEXT: fmov s18, w15
+; CHECK-GI-NEXT: mul v19.8h, v19.8h, v22.8h
+; CHECK-GI-NEXT: umov w15, v21.h[6]
; CHECK-GI-NEXT: mov v1.s[1], wzr
+; CHECK-GI-NEXT: mov v17.s[1], w14
+; CHECK-GI-NEXT: umov w14, v21.h[2]
+; CHECK-GI-NEXT: mov v7.s[2], w8
+; CHECK-GI-NEXT: mul w8, w10, w9
+; CHECK-GI-NEXT: umov w9, v23.h[0]
+; CHECK-GI-NEXT: umov w10, v23.h[1]
+; CHECK-GI-NEXT: mov v16.s[2], w12
+; CHECK-GI-NEXT: umov w12, v21.h[3]
+; CHECK-GI-NEXT: mov v18.s[1], w11
+; CHECK-GI-NEXT: umov w11, v23.h[4]
; CHECK-GI-NEXT: mov v3.s[1], wzr
; CHECK-GI-NEXT: mov v2.s[1], wzr
-; CHECK-GI-NEXT: fmov s20, w15
-; CHECK-GI-NEXT: umov w15, v25.b[13]
+; CHECK-GI-NEXT: mov v17.s[2], w14
+; CHECK-GI-NEXT: umov w14, v23.h[5]
; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: fmov s19, w10
-; CHECK-GI-NEXT: mov v7.s[2], w12
-; CHECK-GI-NEXT: umov w12, v21.b[10]
-; CHECK-GI-NEXT: mov v18.s[1], w14
-; CHECK-GI-NEXT: umov w14, v25.b[5]
-; CHECK-GI-NEXT: mov v17.s[2], w8
-; CHECK-GI-NEXT: umov w8, v21.b[11]
-; CHECK-GI-NEXT: umov w10, v21.b[14]
+; CHECK-GI-NEXT: fmov s20, w9
+; CHECK-GI-NEXT: umov w9, v19.h[1]
; CHECK-GI-NEXT: mov v5.s[2], wzr
-; CHECK-GI-NEXT: mov v19.s[1], w11
-; CHECK-GI-NEXT: umov w11, v25.b[2]
+; CHECK-GI-NEXT: mov v16.s[3], w13
+; CHECK-GI-NEXT: umov w13, v19.h[0]
+; CHECK-GI-NEXT: mov v18.s[2], w15
+; CHECK-GI-NEXT: umov w15, v21.h[7]
+; CHECK-GI-NEXT: fmov s21, w11
+; CHECK-GI-NEXT: umov w11, v23.h[2]
+; CHECK-GI-NEXT: mov v17.s[3], w12
+; CHECK-GI-NEXT: umov w12, v19.h[4]
+; CHECK-GI-NEXT: mov v20.s[1], w10
+; CHECK-GI-NEXT: umov w10, v23.h[3]
; CHECK-GI-NEXT: mov v6.s[2], wzr
-; CHECK-GI-NEXT: mov v16.s[2], w12
-; CHECK-GI-NEXT: umov w12, v25.b[8]
-; CHECK-GI-NEXT: mov v7.s[3], w9
-; CHECK-GI-NEXT: mov v20.s[1], w14
-; CHECK-GI-NEXT: umov w14, v21.b[15]
-; CHECK-GI-NEXT: umov w9, v25.b[9]
-; CHECK-GI-NEXT: mov v17.s[3], w13
-; CHECK-GI-NEXT: umov w13, v25.b[12]
+; CHECK-GI-NEXT: umov w16, v24.h[3]
+; CHECK-GI-NEXT: fmov s22, w13
+; CHECK-GI-NEXT: umov w13, v19.h[5]
+; CHECK-GI-NEXT: mov v21.s[1], w14
+; CHECK-GI-NEXT: umov w14, v23.h[6]
+; CHECK-GI-NEXT: mov v18.s[3], w15
+; CHECK-GI-NEXT: umov w15, v19.h[2]
+; CHECK-GI-NEXT: mov v20.s[2], w11
+; CHECK-GI-NEXT: umov w11, v19.h[6]
; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v18.s[2], w11
-; CHECK-GI-NEXT: umov w11, v26.b[0]
-; CHECK-GI-NEXT: mov v19.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w12
-; CHECK-GI-NEXT: umov w12, v26.b[1]
-; CHECK-GI-NEXT: mov v16.s[3], w8
-; CHECK-GI-NEXT: umov w8, v26.b[5]
-; CHECK-GI-NEXT: umov w10, v25.b[6]
+; CHECK-GI-NEXT: mov v22.s[1], w9
+; CHECK-GI-NEXT: umov w9, v23.h[7]
+; CHECK-GI-NEXT: fmov s23, w12
+; CHECK-GI-NEXT: umov w12, v19.h[3]
; CHECK-GI-NEXT: mov v1.s[2], wzr
-; CHECK-GI-NEXT: fmov s23, w13
-; CHECK-GI-NEXT: umov w13, v25.b[3]
; CHECK-GI-NEXT: mov v3.s[2], wzr
-; CHECK-GI-NEXT: fmov s24, w11
-; CHECK-GI-NEXT: mov v21.s[1], w9
-; CHECK-GI-NEXT: umov w9, v25.b[10]
-; CHECK-GI-NEXT: umov w11, v26.b[2]
-; CHECK-GI-NEXT: mov v19.s[3], w14
-; CHECK-GI-NEXT: umov w14, v26.b[13]
-; CHECK-GI-NEXT: mov v23.s[1], w15
-; CHECK-GI-NEXT: umov w15, v25.b[14]
-; CHECK-GI-NEXT: mov v20.s[2], w10
-; CHECK-GI-NEXT: mov v24.s[1], w12
-; CHECK-GI-NEXT: umov w12, v26.b[4]
-; CHECK-GI-NEXT: umov w10, v25.b[7]
-; CHECK-GI-NEXT: mov v21.s[2], w9
-; CHECK-GI-NEXT: umov w9, v25.b[11]
-; CHECK-GI-NEXT: mov v18.s[3], w13
-; CHECK-GI-NEXT: umov w13, v26.b[9]
+; CHECK-GI-NEXT: mov v21.s[2], w14
; CHECK-GI-NEXT: mov v2.s[2], wzr
; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v23.s[2], w15
-; CHECK-GI-NEXT: umov w15, v25.b[15]
+; CHECK-GI-NEXT: mov v23.s[1], w13
; CHECK-GI-NEXT: mov v5.s[3], wzr
-; CHECK-GI-NEXT: fmov s27, w12
-; CHECK-GI-NEXT: mov v24.s[2], w11
-; CHECK-GI-NEXT: umov w11, v26.b[6]
-; CHECK-GI-NEXT: umov w12, v26.b[8]
-; CHECK-GI-NEXT: mov v21.s[3], w9
-; CHECK-GI-NEXT: umov w9, v26.b[12]
-; CHECK-GI-NEXT: mov v20.s[3], w10
-; CHECK-GI-NEXT: umov w10, v26.b[3]
; CHECK-GI-NEXT: mov v6.s[3], wzr
-; CHECK-GI-NEXT: mov v27.s[1], w8
-; CHECK-GI-NEXT: mov v23.s[3], w15
-; CHECK-GI-NEXT: umov w15, v22.b[0]
-; CHECK-GI-NEXT: umov w8, v26.b[7]
+; CHECK-GI-NEXT: mov v22.s[2], w15
+; CHECK-GI-NEXT: mov v7.s[3], w16
+; CHECK-GI-NEXT: mov v20.s[3], w10
; CHECK-GI-NEXT: mov v0.s[3], wzr
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: fmov s25, w12
-; CHECK-GI-NEXT: fmov s29, w9
-; CHECK-GI-NEXT: umov w9, v22.b[5]
-; CHECK-GI-NEXT: mov v24.s[3], w10
-; CHECK-GI-NEXT: umov w10, v22.b[1]
-; CHECK-GI-NEXT: umov w12, v26.b[10]
-; CHECK-GI-NEXT: mov v27.s[2], w11
-; CHECK-GI-NEXT: umov w11, v22.b[4]
-; CHECK-GI-NEXT: fmov s28, w15
-; CHECK-GI-NEXT: mov v25.s[1], w13
-; CHECK-GI-NEXT: umov w13, v26.b[14]
-; CHECK-GI-NEXT: mov v29.s[1], w14
-; CHECK-GI-NEXT: umov w15, v22.b[12]
-; CHECK-GI-NEXT: umov w14, v22.b[2]
; CHECK-GI-NEXT: mov v3.s[3], wzr
-; CHECK-GI-NEXT: mov v28.s[1], w10
-; CHECK-GI-NEXT: umov w10, v22.b[13]
+; CHECK-GI-NEXT: mov v21.s[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
; CHECK-GI-NEXT: mov v2.s[3], wzr
-; CHECK-GI-NEXT: fmov s30, w11
-; CHECK-GI-NEXT: umov w11, v22.b[6]
-; CHECK-GI-NEXT: mov v27.s[3], w8
-; CHECK-GI-NEXT: mov v25.s[2], w12
-; CHECK-GI-NEXT: mov v29.s[2], w13
-; CHECK-GI-NEXT: umov w13, v26.b[11]
-; CHECK-GI-NEXT: fmov s31, w15
-; CHECK-GI-NEXT: umov w15, v26.b[15]
-; CHECK-GI-NEXT: umov w12, v22.b[9]
-; CHECK-GI-NEXT: mov v30.s[1], w9
-; CHECK-GI-NEXT: umov w9, v22.b[8]
-; CHECK-GI-NEXT: mov v28.s[2], w14
-; CHECK-GI-NEXT: ldrb w14, [x1, #32]
-; CHECK-GI-NEXT: umov w8, v22.b[15]
-; CHECK-GI-NEXT: mul v17.4s, v17.4s, v27.4s
-; CHECK-GI-NEXT: mov v31.s[1], w10
-; CHECK-GI-NEXT: umov w10, v22.b[14]
-; CHECK-GI-NEXT: mov v25.s[3], w13
-; CHECK-GI-NEXT: ldrb w13, [x0, #32]
-; CHECK-GI-NEXT: mov v29.s[3], w15
+; CHECK-GI-NEXT: mov v23.s[2], w11
+; CHECK-GI-NEXT: umov w11, v19.h[7]
+; CHECK-GI-NEXT: fmov s19, w8
+; CHECK-GI-NEXT: mov v22.s[3], w12
; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mov v30.s[2], w11
-; CHECK-GI-NEXT: fmov s26, w9
-; CHECK-GI-NEXT: umov w9, v22.b[7]
-; CHECK-GI-NEXT: umov w11, v22.b[3]
; CHECK-GI-NEXT: add v5.4s, v5.4s, v6.4s
-; CHECK-GI-NEXT: mla v17.4s, v7.4s, v24.4s
-; CHECK-GI-NEXT: mov v31.s[2], w10
+; CHECK-GI-NEXT: add v6.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v7.4s, v17.4s, v18.4s
; CHECK-GI-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-NEXT: mov v26.s[1], w12
-; CHECK-GI-NEXT: umov w12, v22.b[10]
-; CHECK-GI-NEXT: mul v19.4s, v19.4s, v29.4s
-; CHECK-GI-NEXT: mov v30.s[3], w9
-; CHECK-GI-NEXT: mul w9, w14, w13
-; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: mov v28.s[3], w11
+; CHECK-GI-NEXT: mov v19.s[1], wzr
+; CHECK-GI-NEXT: add v16.4s, v20.4s, v21.4s
+; CHECK-GI-NEXT: mov v23.s[3], w11
; CHECK-GI-NEXT: add v0.4s, v0.4s, v5.4s
-; CHECK-GI-NEXT: mov v31.s[3], w8
-; CHECK-GI-NEXT: umov w8, v22.b[11]
-; CHECK-GI-NEXT: fmov s8, w9
-; CHECK-GI-NEXT: mov v26.s[2], w12
-; CHECK-GI-NEXT: mla v19.4s, v16.4s, v25.4s
-; CHECK-GI-NEXT: mul v20.4s, v20.4s, v30.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
+; CHECK-GI-NEXT: add v3.4s, v6.4s, v7.4s
+; CHECK-GI-NEXT: mov v19.s[2], wzr
+; CHECK-GI-NEXT: add v17.4s, v22.4s, v23.4s
; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT: mov v8.s[1], wzr
-; CHECK-GI-NEXT: mul v22.4s, v23.4s, v31.4s
-; CHECK-GI-NEXT: mov v26.s[3], w8
-; CHECK-GI-NEXT: add v3.4s, v17.4s, v19.4s
-; CHECK-GI-NEXT: mla v20.4s, v18.4s, v28.4s
-; CHECK-GI-NEXT: mov v8.s[2], wzr
-; CHECK-GI-NEXT: mla v22.4s, v21.4s, v26.4s
-; CHECK-GI-NEXT: mov v8.s[3], wzr
-; CHECK-GI-NEXT: add v4.4s, v20.4s, v22.4s
-; CHECK-GI-NEXT: add v0.4s, v8.4s, v0.4s
+; CHECK-GI-NEXT: mov v19.s[3], wzr
+; CHECK-GI-NEXT: add v4.4s, v16.4s, v17.4s
; CHECK-GI-NEXT: add v2.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v0.4s, v19.4s, v0.4s
; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
; CHECK-GI-NEXT: add v0.4s, v2.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <33 x i8>, ptr %a
@@ -4359,197 +5143,412 @@ define i32 @test_sdot_v33i8(ptr nocapture readonly %a, ptr nocapture readonly %b
;
; CHECK-GI-LABEL: test_sdot_v33i8:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: str d8, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
-; CHECK-GI-NEXT: .cfi_offset b8, -16
-; CHECK-GI-NEXT: ldp q21, q25, [x1]
-; CHECK-GI-NEXT: fmov s5, wzr
-; CHECK-GI-NEXT: ldp q26, q22, [x0]
-; CHECK-GI-NEXT: fmov s6, wzr
-; CHECK-GI-NEXT: fmov s0, wzr
+; CHECK-GI-NEXT: sub sp, sp, #112
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x28, x27, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x26, x25, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x24, x23, [sp, #64] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x22, x21, [sp, #80] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #96] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 112
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w21, -24
+; CHECK-GI-NEXT: .cfi_offset w22, -32
+; CHECK-GI-NEXT: .cfi_offset w23, -40
+; CHECK-GI-NEXT: .cfi_offset w24, -48
+; CHECK-GI-NEXT: .cfi_offset w25, -56
+; CHECK-GI-NEXT: .cfi_offset w26, -64
+; CHECK-GI-NEXT: .cfi_offset w27, -72
+; CHECK-GI-NEXT: .cfi_offset w28, -80
+; CHECK-GI-NEXT: .cfi_offset w30, -88
+; CHECK-GI-NEXT: .cfi_offset w29, -96
+; CHECK-GI-NEXT: ldp q7, q16, [x1]
; CHECK-GI-NEXT: fmov s1, wzr
+; CHECK-GI-NEXT: str w2, [sp, #12] // 4-byte Folded Spill
; CHECK-GI-NEXT: fmov s3, wzr
-; CHECK-GI-NEXT: smov w8, v21.b[0]
-; CHECK-GI-NEXT: smov w9, v21.b[4]
-; CHECK-GI-NEXT: smov w10, v21.b[1]
-; CHECK-GI-NEXT: smov w13, v21.b[8]
-; CHECK-GI-NEXT: smov w11, v21.b[5]
-; CHECK-GI-NEXT: smov w14, v21.b[9]
-; CHECK-GI-NEXT: smov w15, v25.b[0]
-; CHECK-GI-NEXT: smov w12, v21.b[2]
; CHECK-GI-NEXT: fmov s2, wzr
+; CHECK-GI-NEXT: fmov s5, wzr
; CHECK-GI-NEXT: fmov s4, wzr
-; CHECK-GI-NEXT: mov v5.s[1], wzr
-; CHECK-GI-NEXT: mov v6.s[1], wzr
-; CHECK-GI-NEXT: fmov s7, w8
-; CHECK-GI-NEXT: fmov s17, w9
-; CHECK-GI-NEXT: smov w8, v21.b[6]
-; CHECK-GI-NEXT: fmov s16, w13
-; CHECK-GI-NEXT: smov w9, v21.b[3]
-; CHECK-GI-NEXT: smov w13, v21.b[7]
-; CHECK-GI-NEXT: fmov s18, w15
-; CHECK-GI-NEXT: smov w15, v25.b[4]
-; CHECK-GI-NEXT: mov v0.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], w10
-; CHECK-GI-NEXT: smov w10, v21.b[12]
-; CHECK-GI-NEXT: mov v17.s[1], w11
-; CHECK-GI-NEXT: smov w11, v21.b[13]
-; CHECK-GI-NEXT: mov v16.s[1], w14
-; CHECK-GI-NEXT: smov w14, v25.b[1]
+; CHECK-GI-NEXT: fmov s6, wzr
+; CHECK-GI-NEXT: mov b19, v7.b[3]
+; CHECK-GI-NEXT: mov b23, v7.b[7]
+; CHECK-GI-NEXT: mov b17, v7.b[1]
+; CHECK-GI-NEXT: fmov w11, s7
+; CHECK-GI-NEXT: mov b18, v7.b[2]
+; CHECK-GI-NEXT: mov b20, v7.b[4]
+; CHECK-GI-NEXT: mov b21, v7.b[5]
+; CHECK-GI-NEXT: mov b22, v7.b[6]
+; CHECK-GI-NEXT: mov b24, v7.b[8]
+; CHECK-GI-NEXT: mov b25, v7.b[9]
+; CHECK-GI-NEXT: mov b26, v7.b[10]
+; CHECK-GI-NEXT: mov b27, v7.b[11]
+; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: mov b28, v7.b[12]
+; CHECK-GI-NEXT: fmov w14, s19
+; CHECK-GI-NEXT: mov b19, v7.b[13]
+; CHECK-GI-NEXT: mov b29, v7.b[14]
+; CHECK-GI-NEXT: mov b7, v7.b[15]
+; CHECK-GI-NEXT: fmov w7, s23
+; CHECK-GI-NEXT: mov b23, v16.b[6]
+; CHECK-GI-NEXT: fmov w10, s17
+; CHECK-GI-NEXT: fmov w9, s18
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: mov b30, v16.b[1]
+; CHECK-GI-NEXT: fmov w16, s25
+; CHECK-GI-NEXT: fmov w12, s20
+; CHECK-GI-NEXT: fmov w24, s21
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: sxtb w7, w7
+; CHECK-GI-NEXT: fmov w22, s22
+; CHECK-GI-NEXT: stp s23, s7, [sp, #4] // 8-byte Folded Spill
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: fmov s7, w11
+; CHECK-GI-NEXT: mov b20, v16.b[2]
+; CHECK-GI-NEXT: mov b17, v16.b[3]
+; CHECK-GI-NEXT: mov b21, v16.b[4]
+; CHECK-GI-NEXT: mov b18, v16.b[5]
+; CHECK-GI-NEXT: fmov w27, s26
+; CHECK-GI-NEXT: fmov w25, s27
+; CHECK-GI-NEXT: mov b22, v16.b[7]
+; CHECK-GI-NEXT: fmov w26, s28
+; CHECK-GI-NEXT: mov v7.h[1], w10
+; CHECK-GI-NEXT: sxtb w10, w16
+; CHECK-GI-NEXT: mov b25, v16.b[8]
+; CHECK-GI-NEXT: fmov w23, s19
+; CHECK-GI-NEXT: mov b24, v16.b[9]
+; CHECK-GI-NEXT: fmov w5, s29
+; CHECK-GI-NEXT: mov b26, v16.b[10]
+; CHECK-GI-NEXT: mov b19, v16.b[11]
+; CHECK-GI-NEXT: fmov w6, s30
+; CHECK-GI-NEXT: mov b27, v16.b[12]
+; CHECK-GI-NEXT: mov b28, v16.b[13]
+; CHECK-GI-NEXT: mov b29, v16.b[14]
+; CHECK-GI-NEXT: sxtb w30, w23
+; CHECK-GI-NEXT: sxtb w5, w5
+; CHECK-GI-NEXT: mov v7.h[2], w9
+; CHECK-GI-NEXT: sxtb w9, w14
+; CHECK-GI-NEXT: fmov w20, s16
+; CHECK-GI-NEXT: mov b30, v16.b[15]
+; CHECK-GI-NEXT: fmov s16, w8
+; CHECK-GI-NEXT: sxtb w8, w12
+; CHECK-GI-NEXT: fmov w15, s17
+; CHECK-GI-NEXT: fmov w11, s18
+; CHECK-GI-NEXT: ldp q18, q17, [x0]
+; CHECK-GI-NEXT: mov v7.h[3], w9
+; CHECK-GI-NEXT: sxtb w9, w27
+; CHECK-GI-NEXT: fmov w18, s20
+; CHECK-GI-NEXT: sxtb w15, w15
+; CHECK-GI-NEXT: mov v16.h[1], w10
+; CHECK-GI-NEXT: sxtb w10, w25
+; CHECK-GI-NEXT: mov b20, v18.b[3]
+; CHECK-GI-NEXT: fmov w2, s22
+; CHECK-GI-NEXT: mov b22, v18.b[1]
+; CHECK-GI-NEXT: sxtb w18, w18
+; CHECK-GI-NEXT: fmov w13, s21
+; CHECK-GI-NEXT: mov b21, v18.b[2]
+; CHECK-GI-NEXT: mov v7.h[4], w8
+; CHECK-GI-NEXT: fmov w3, s19
+; CHECK-GI-NEXT: mov b19, v18.b[6]
+; CHECK-GI-NEXT: mov v16.h[2], w9
+; CHECK-GI-NEXT: sxtb w9, w24
+; CHECK-GI-NEXT: fmov w21, s25
+; CHECK-GI-NEXT: sxtb w13, w13
+; CHECK-GI-NEXT: fmov w28, s20
+; CHECK-GI-NEXT: mov b20, v18.b[11]
+; CHECK-GI-NEXT: fmov w8, s22
+; CHECK-GI-NEXT: mov b25, v18.b[8]
+; CHECK-GI-NEXT: fmov w29, s21
+; CHECK-GI-NEXT: mov v7.h[5], w9
+; CHECK-GI-NEXT: sxtb w9, w22
+; CHECK-GI-NEXT: fmov w19, s24
+; CHECK-GI-NEXT: mov v16.h[3], w10
+; CHECK-GI-NEXT: sxtb w10, w26
+; CHECK-GI-NEXT: fmov w26, s18
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w29, w29
+; CHECK-GI-NEXT: mov b24, v18.b[4]
+; CHECK-GI-NEXT: mov b23, v18.b[5]
+; CHECK-GI-NEXT: fmov w17, s27
+; CHECK-GI-NEXT: mov b27, v18.b[9]
+; CHECK-GI-NEXT: sxtb w23, w26
+; CHECK-GI-NEXT: mov v7.h[6], w9
+; CHECK-GI-NEXT: fmov w24, s19
+; CHECK-GI-NEXT: mov v16.h[4], w10
+; CHECK-GI-NEXT: mov b19, v18.b[14]
+; CHECK-GI-NEXT: fmov w10, s25
+; CHECK-GI-NEXT: fmov w4, s26
+; CHECK-GI-NEXT: fmov w16, s28
+; CHECK-GI-NEXT: mov b26, v18.b[7]
+; CHECK-GI-NEXT: mov b28, v18.b[10]
+; CHECK-GI-NEXT: fmov w27, s24
+; CHECK-GI-NEXT: mov b24, v18.b[12]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v7.h[7], w7
+; CHECK-GI-NEXT: fmov w7, s20
+; CHECK-GI-NEXT: sxtb w4, w4
+; CHECK-GI-NEXT: fmov s20, w23
+; CHECK-GI-NEXT: fmov w25, s23
+; CHECK-GI-NEXT: mov b23, v18.b[13]
+; CHECK-GI-NEXT: mov b22, v18.b[15]
+; CHECK-GI-NEXT: mov v16.h[5], w30
+; CHECK-GI-NEXT: sxtb w7, w7
+; CHECK-GI-NEXT: fmov w9, s27
+; CHECK-GI-NEXT: mov b21, v17.b[1]
+; CHECK-GI-NEXT: mov v20.h[1], w8
+; CHECK-GI-NEXT: sxtb w8, w20
+; CHECK-GI-NEXT: sxtb w20, w6
+; CHECK-GI-NEXT: fmov w6, s19
+; CHECK-GI-NEXT: fmov w26, s28
+; CHECK-GI-NEXT: mov b28, v17.b[8]
+; CHECK-GI-NEXT: fmov s18, w8
+; CHECK-GI-NEXT: sxtb w8, w21
+; CHECK-GI-NEXT: mov v16.h[6], w5
+; CHECK-GI-NEXT: fmov w5, s22
+; CHECK-GI-NEXT: fmov s22, w10
+; CHECK-GI-NEXT: sxtb w10, w27
+; CHECK-GI-NEXT: sxtb w26, w26
+; CHECK-GI-NEXT: mov v20.h[2], w29
+; CHECK-GI-NEXT: fmov s19, w8
+; CHECK-GI-NEXT: sxtb w8, w28
+; CHECK-GI-NEXT: sxtb w28, w19
+; CHECK-GI-NEXT: sxtb w19, w9
+; CHECK-GI-NEXT: fmov w27, s17
+; CHECK-GI-NEXT: mov b25, v17.b[2]
+; CHECK-GI-NEXT: fmov w29, s21
+; CHECK-GI-NEXT: mov b21, v17.b[9]
+; CHECK-GI-NEXT: mov v22.h[1], w19
+; CHECK-GI-NEXT: fmov w23, s23
+; CHECK-GI-NEXT: mov v20.h[3], w8
+; CHECK-GI-NEXT: mov b23, v17.b[6]
+; CHECK-GI-NEXT: fmov w30, s24
+; CHECK-GI-NEXT: sxtb w27, w27
+; CHECK-GI-NEXT: mov b24, v17.b[5]
+; CHECK-GI-NEXT: mov v18.h[1], w20
+; CHECK-GI-NEXT: fmov w21, s25
+; CHECK-GI-NEXT: mov b25, v17.b[10]
+; CHECK-GI-NEXT: mov v19.h[1], w28
+; CHECK-GI-NEXT: sxtb w28, w29
+; CHECK-GI-NEXT: mov v22.h[2], w26
+; CHECK-GI-NEXT: fmov w26, s21
+; CHECK-GI-NEXT: mov v20.h[4], w10
+; CHECK-GI-NEXT: fmov w10, s28
+; CHECK-GI-NEXT: fmov s21, w27
+; CHECK-GI-NEXT: sxtb w21, w21
+; CHECK-GI-NEXT: mov b27, v17.b[3]
+; CHECK-GI-NEXT: fmov w19, s23
+; CHECK-GI-NEXT: sxtb w26, w26
+; CHECK-GI-NEXT: fmov w22, s26
+; CHECK-GI-NEXT: mov b26, v17.b[4]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v21.h[1], w28
+; CHECK-GI-NEXT: fmov w8, s24
+; CHECK-GI-NEXT: mov b24, v17.b[11]
+; CHECK-GI-NEXT: fmov w27, s25
+; CHECK-GI-NEXT: mov v18.h[2], w18
+; CHECK-GI-NEXT: sxtb w18, w25
+; CHECK-GI-NEXT: fmov s23, w10
+; CHECK-GI-NEXT: fmov w20, s27
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: fmov w9, s26
+; CHECK-GI-NEXT: mov b26, v17.b[12]
+; CHECK-GI-NEXT: sxtb w25, w27
+; CHECK-GI-NEXT: mov v20.h[5], w18
+; CHECK-GI-NEXT: sxtb w18, w3
+; CHECK-GI-NEXT: sxtb w3, w24
+; CHECK-GI-NEXT: mov v23.h[1], w26
+; CHECK-GI-NEXT: mov v21.h[2], w21
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov w28, s24
+; CHECK-GI-NEXT: mov v22.h[3], w7
+; CHECK-GI-NEXT: sxtb w7, w20
+; CHECK-GI-NEXT: mov v19.h[2], w4
+; CHECK-GI-NEXT: sxtb w4, w30
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v18.h[3], w15
+; CHECK-GI-NEXT: sxtb w20, w28
+; CHECK-GI-NEXT: sxtb w15, w17
+; CHECK-GI-NEXT: sxtb w17, w22
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #80] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov v23.h[2], w25
+; CHECK-GI-NEXT: mov v20.h[6], w3
+; CHECK-GI-NEXT: mov v21.h[3], w7
+; CHECK-GI-NEXT: fmov w10, s26
+; CHECK-GI-NEXT: mov v22.h[4], w4
+; CHECK-GI-NEXT: mov v19.h[3], w18
+; CHECK-GI-NEXT: sxtb w18, w23
+; CHECK-GI-NEXT: ldp x24, x23, [sp, #64] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mov b27, v17.b[13]
+; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: mov v23.h[3], w20
+; CHECK-GI-NEXT: mov v18.h[4], w13
+; CHECK-GI-NEXT: sxtb w13, w6
+; CHECK-GI-NEXT: mov v20.h[7], w17
+; CHECK-GI-NEXT: mov v21.h[4], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #8] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v22.h[5], w18
+; CHECK-GI-NEXT: mov b25, v17.b[14]
+; CHECK-GI-NEXT: fmov w26, s27
+; CHECK-GI-NEXT: mov v19.h[4], w15
+; CHECK-GI-NEXT: fmov w14, s29
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v23.h[4], w10
+; CHECK-GI-NEXT: sxtb w10, w11
+; CHECK-GI-NEXT: sxtb w11, w16
+; CHECK-GI-NEXT: mov v21.h[5], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #4] // 4-byte Folded Reload
+; CHECK-GI-NEXT: sxtb w15, w26
+; CHECK-GI-NEXT: ldp x26, x25, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v18.h[5], w10
+; CHECK-GI-NEXT: sxtb w10, w19
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #96] // 16-byte Folded Reload
+; CHECK-GI-NEXT: mul v20.8h, v7.8h, v20.8h
+; CHECK-GI-NEXT: mov b7, v17.b[7]
+; CHECK-GI-NEXT: mov v22.h[6], w13
+; CHECK-GI-NEXT: sxtb w13, w5
+; CHECK-GI-NEXT: fmov w27, s25
+; CHECK-GI-NEXT: mov v19.h[5], w11
+; CHECK-GI-NEXT: sxtb w11, w2
+; CHECK-GI-NEXT: mov b17, v17.b[15]
+; CHECK-GI-NEXT: mov v18.h[6], w8
+; CHECK-GI-NEXT: mov v16.h[7], w9
+; CHECK-GI-NEXT: sxtb w9, w14
+; CHECK-GI-NEXT: mov v23.h[5], w15
+; CHECK-GI-NEXT: mov v21.h[6], w10
+; CHECK-GI-NEXT: sxtb w14, w27
+; CHECK-GI-NEXT: ldp x28, x27, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: fmov w8, s7
+; CHECK-GI-NEXT: mov v22.h[7], w13
+; CHECK-GI-NEXT: fmov w12, s30
+; CHECK-GI-NEXT: mov v19.h[6], w9
+; CHECK-GI-NEXT: fmov w9, s17
+; CHECK-GI-NEXT: smov w10, v20.h[0]
+; CHECK-GI-NEXT: mov v23.h[6], w14
+; CHECK-GI-NEXT: mov v18.h[7], w11
+; CHECK-GI-NEXT: smov w13, v20.h[1]
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: smov w11, v20.h[4]
+; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mul v22.8h, v16.8h, v22.8h
+; CHECK-GI-NEXT: smov w14, v20.h[3]
+; CHECK-GI-NEXT: mov v21.h[7], w8
+; CHECK-GI-NEXT: ldrsb w8, [x0, #32]
+; CHECK-GI-NEXT: mov v19.h[7], w12
+; CHECK-GI-NEXT: mov v23.h[7], w9
+; CHECK-GI-NEXT: ldrsb w9, [x1, #32]
+; CHECK-GI-NEXT: fmov s7, w10
+; CHECK-GI-NEXT: smov w10, v20.h[2]
+; CHECK-GI-NEXT: smov w12, v20.h[5]
+; CHECK-GI-NEXT: fmov s16, w11
+; CHECK-GI-NEXT: mul w9, w9, w8
+; CHECK-GI-NEXT: smov w15, v22.h[4]
+; CHECK-GI-NEXT: smov w17, v22.h[5]
+; CHECK-GI-NEXT: mul v24.8h, v18.8h, v21.8h
+; CHECK-GI-NEXT: mov v7.s[1], w13
+; CHECK-GI-NEXT: smov w13, v22.h[0]
+; CHECK-GI-NEXT: mul v18.8h, v19.8h, v23.8h
+; CHECK-GI-NEXT: smov w16, v22.h[1]
+; CHECK-GI-NEXT: smov w8, v20.h[7]
+; CHECK-GI-NEXT: sxth w9, w9
+; CHECK-GI-NEXT: mov v16.s[1], w12
+; CHECK-GI-NEXT: fmov s0, wzr
+; CHECK-GI-NEXT: fmov s19, w15
+; CHECK-GI-NEXT: smov w15, v22.h[6]
; CHECK-GI-NEXT: mov v1.s[1], wzr
+; CHECK-GI-NEXT: smov w11, v24.h[0]
+; CHECK-GI-NEXT: mov v7.s[2], w10
+; CHECK-GI-NEXT: smov w10, v20.h[6]
+; CHECK-GI-NEXT: smov w12, v24.h[1]
+; CHECK-GI-NEXT: smov w0, v18.h[4]
+; CHECK-GI-NEXT: fmov s17, w13
+; CHECK-GI-NEXT: mov v19.s[1], w17
+; CHECK-GI-NEXT: smov w17, v18.h[0]
+; CHECK-GI-NEXT: smov w18, v18.h[1]
+; CHECK-GI-NEXT: smov w13, v22.h[2]
; CHECK-GI-NEXT: mov v3.s[1], wzr
; CHECK-GI-NEXT: mov v2.s[1], wzr
-; CHECK-GI-NEXT: fmov s20, w15
-; CHECK-GI-NEXT: smov w15, v25.b[13]
+; CHECK-GI-NEXT: fmov s20, w11
+; CHECK-GI-NEXT: smov w11, v24.h[4]
+; CHECK-GI-NEXT: mov v7.s[3], w14
+; CHECK-GI-NEXT: smov w14, v24.h[5]
+; CHECK-GI-NEXT: mov v17.s[1], w16
+; CHECK-GI-NEXT: smov w16, v24.h[2]
+; CHECK-GI-NEXT: mov v19.s[2], w15
+; CHECK-GI-NEXT: smov w15, v18.h[5]
+; CHECK-GI-NEXT: fmov s23, w0
+; CHECK-GI-NEXT: mov v20.s[1], w12
+; CHECK-GI-NEXT: mov v16.s[2], w10
+; CHECK-GI-NEXT: smov w10, v22.h[3]
+; CHECK-GI-NEXT: fmov s21, w11
+; CHECK-GI-NEXT: smov w11, v22.h[7]
+; CHECK-GI-NEXT: fmov s22, w17
+; CHECK-GI-NEXT: mov v5.s[1], wzr
; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: fmov s19, w10
-; CHECK-GI-NEXT: mov v7.s[2], w12
-; CHECK-GI-NEXT: smov w12, v21.b[10]
-; CHECK-GI-NEXT: mov v18.s[1], w14
-; CHECK-GI-NEXT: smov w14, v25.b[5]
-; CHECK-GI-NEXT: mov v17.s[2], w8
-; CHECK-GI-NEXT: smov w8, v21.b[11]
-; CHECK-GI-NEXT: smov w10, v21.b[14]
-; CHECK-GI-NEXT: mov v5.s[2], wzr
-; CHECK-GI-NEXT: mov v19.s[1], w11
-; CHECK-GI-NEXT: smov w11, v25.b[2]
-; CHECK-GI-NEXT: mov v6.s[2], wzr
-; CHECK-GI-NEXT: mov v16.s[2], w12
-; CHECK-GI-NEXT: smov w12, v25.b[8]
-; CHECK-GI-NEXT: mov v7.s[3], w9
-; CHECK-GI-NEXT: mov v20.s[1], w14
-; CHECK-GI-NEXT: smov w14, v21.b[15]
-; CHECK-GI-NEXT: smov w9, v25.b[9]
-; CHECK-GI-NEXT: mov v17.s[3], w13
-; CHECK-GI-NEXT: smov w13, v25.b[12]
-; CHECK-GI-NEXT: mov v0.s[2], wzr
-; CHECK-GI-NEXT: mov v18.s[2], w11
-; CHECK-GI-NEXT: smov w11, v26.b[0]
-; CHECK-GI-NEXT: mov v19.s[2], w10
-; CHECK-GI-NEXT: fmov s21, w12
-; CHECK-GI-NEXT: smov w12, v26.b[1]
-; CHECK-GI-NEXT: mov v16.s[3], w8
-; CHECK-GI-NEXT: smov w8, v26.b[5]
-; CHECK-GI-NEXT: smov w10, v25.b[6]
+; CHECK-GI-NEXT: mov v6.s[1], wzr
+; CHECK-GI-NEXT: mov v23.s[1], w15
+; CHECK-GI-NEXT: smov w15, v18.h[6]
+; CHECK-GI-NEXT: mov v0.s[1], wzr
+; CHECK-GI-NEXT: mov v21.s[1], w14
+; CHECK-GI-NEXT: smov w14, v24.h[6]
+; CHECK-GI-NEXT: mov v20.s[2], w16
+; CHECK-GI-NEXT: mov v22.s[1], w18
+; CHECK-GI-NEXT: smov w16, v18.h[2]
; CHECK-GI-NEXT: mov v1.s[2], wzr
-; CHECK-GI-NEXT: fmov s23, w13
-; CHECK-GI-NEXT: smov w13, v25.b[3]
; CHECK-GI-NEXT: mov v3.s[2], wzr
-; CHECK-GI-NEXT: fmov s24, w11
-; CHECK-GI-NEXT: mov v21.s[1], w9
-; CHECK-GI-NEXT: smov w9, v25.b[10]
-; CHECK-GI-NEXT: smov w11, v26.b[2]
-; CHECK-GI-NEXT: mov v19.s[3], w14
-; CHECK-GI-NEXT: smov w14, v26.b[13]
-; CHECK-GI-NEXT: mov v23.s[1], w15
-; CHECK-GI-NEXT: smov w15, v25.b[14]
-; CHECK-GI-NEXT: mov v20.s[2], w10
-; CHECK-GI-NEXT: mov v24.s[1], w12
-; CHECK-GI-NEXT: smov w12, v26.b[4]
-; CHECK-GI-NEXT: smov w10, v25.b[7]
-; CHECK-GI-NEXT: mov v21.s[2], w9
-; CHECK-GI-NEXT: smov w9, v25.b[11]
-; CHECK-GI-NEXT: mov v18.s[3], w13
-; CHECK-GI-NEXT: smov w13, v26.b[9]
; CHECK-GI-NEXT: mov v2.s[2], wzr
+; CHECK-GI-NEXT: mov v5.s[2], wzr
; CHECK-GI-NEXT: mov v4.s[2], wzr
+; CHECK-GI-NEXT: mov v6.s[2], wzr
; CHECK-GI-NEXT: mov v23.s[2], w15
-; CHECK-GI-NEXT: smov w15, v25.b[15]
-; CHECK-GI-NEXT: mov v5.s[3], wzr
-; CHECK-GI-NEXT: fmov s27, w12
-; CHECK-GI-NEXT: mov v24.s[2], w11
-; CHECK-GI-NEXT: smov w11, v26.b[6]
-; CHECK-GI-NEXT: smov w12, v26.b[8]
-; CHECK-GI-NEXT: mov v21.s[3], w9
-; CHECK-GI-NEXT: smov w9, v26.b[12]
-; CHECK-GI-NEXT: mov v20.s[3], w10
-; CHECK-GI-NEXT: smov w10, v26.b[3]
-; CHECK-GI-NEXT: mov v6.s[3], wzr
-; CHECK-GI-NEXT: mov v27.s[1], w8
-; CHECK-GI-NEXT: mov v23.s[3], w15
-; CHECK-GI-NEXT: smov w15, v22.b[0]
-; CHECK-GI-NEXT: smov w8, v26.b[7]
-; CHECK-GI-NEXT: mov v0.s[3], wzr
+; CHECK-GI-NEXT: mov v21.s[2], w14
+; CHECK-GI-NEXT: smov w14, v18.h[3]
+; CHECK-GI-NEXT: smov w15, v18.h[7]
+; CHECK-GI-NEXT: fmov s18, w9
+; CHECK-GI-NEXT: ldr w9, [sp, #12] // 4-byte Folded Reload
+; CHECK-GI-NEXT: mov v17.s[2], w13
+; CHECK-GI-NEXT: smov w12, v24.h[3]
+; CHECK-GI-NEXT: smov w13, v24.h[7]
+; CHECK-GI-NEXT: mov v22.s[2], w16
+; CHECK-GI-NEXT: mov v0.s[2], wzr
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: fmov s25, w12
-; CHECK-GI-NEXT: fmov s29, w9
-; CHECK-GI-NEXT: smov w9, v22.b[5]
-; CHECK-GI-NEXT: mov v24.s[3], w10
-; CHECK-GI-NEXT: smov w10, v22.b[1]
-; CHECK-GI-NEXT: smov w12, v26.b[10]
-; CHECK-GI-NEXT: mov v27.s[2], w11
-; CHECK-GI-NEXT: smov w11, v22.b[4]
-; CHECK-GI-NEXT: fmov s28, w15
-; CHECK-GI-NEXT: mov v25.s[1], w13
-; CHECK-GI-NEXT: smov w13, v26.b[14]
-; CHECK-GI-NEXT: mov v29.s[1], w14
-; CHECK-GI-NEXT: smov w15, v22.b[12]
-; CHECK-GI-NEXT: smov w14, v22.b[2]
; CHECK-GI-NEXT: mov v3.s[3], wzr
-; CHECK-GI-NEXT: mov v28.s[1], w10
-; CHECK-GI-NEXT: smov w10, v22.b[13]
; CHECK-GI-NEXT: mov v2.s[3], wzr
-; CHECK-GI-NEXT: fmov s30, w11
-; CHECK-GI-NEXT: smov w11, v22.b[6]
-; CHECK-GI-NEXT: mov v27.s[3], w8
-; CHECK-GI-NEXT: mov v25.s[2], w12
-; CHECK-GI-NEXT: mov v29.s[2], w13
-; CHECK-GI-NEXT: smov w13, v26.b[11]
-; CHECK-GI-NEXT: fmov s31, w15
-; CHECK-GI-NEXT: smov w15, v26.b[15]
-; CHECK-GI-NEXT: smov w12, v22.b[9]
-; CHECK-GI-NEXT: mov v30.s[1], w9
-; CHECK-GI-NEXT: smov w9, v22.b[8]
-; CHECK-GI-NEXT: mov v28.s[2], w14
-; CHECK-GI-NEXT: ldrsb w14, [x1, #32]
-; CHECK-GI-NEXT: smov w8, v22.b[15]
-; CHECK-GI-NEXT: mul v17.4s, v17.4s, v27.4s
-; CHECK-GI-NEXT: mov v31.s[1], w10
-; CHECK-GI-NEXT: smov w10, v22.b[14]
-; CHECK-GI-NEXT: mov v25.s[3], w13
-; CHECK-GI-NEXT: ldrsb w13, [x0, #32]
-; CHECK-GI-NEXT: mov v29.s[3], w15
+; CHECK-GI-NEXT: mov v5.s[3], wzr
; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mov v30.s[2], w11
-; CHECK-GI-NEXT: fmov s26, w9
-; CHECK-GI-NEXT: smov w9, v22.b[7]
-; CHECK-GI-NEXT: smov w11, v22.b[3]
-; CHECK-GI-NEXT: add v5.4s, v5.4s, v6.4s
-; CHECK-GI-NEXT: mla v17.4s, v7.4s, v24.4s
-; CHECK-GI-NEXT: mov v31.s[2], w10
+; CHECK-GI-NEXT: mov v6.s[3], wzr
+; CHECK-GI-NEXT: mov v18.s[1], wzr
+; CHECK-GI-NEXT: mov v16.s[3], w8
+; CHECK-GI-NEXT: mov v17.s[3], w10
+; CHECK-GI-NEXT: mov v19.s[3], w11
+; CHECK-GI-NEXT: mov v20.s[3], w12
+; CHECK-GI-NEXT: mov v21.s[3], w13
+; CHECK-GI-NEXT: mov v22.s[3], w14
+; CHECK-GI-NEXT: mov v23.s[3], w15
+; CHECK-GI-NEXT: mov v0.s[3], wzr
; CHECK-GI-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-NEXT: mov v26.s[1], w12
-; CHECK-GI-NEXT: smov w12, v22.b[10]
-; CHECK-GI-NEXT: mul v19.4s, v19.4s, v29.4s
-; CHECK-GI-NEXT: mov v30.s[3], w9
-; CHECK-GI-NEXT: mul w9, w14, w13
-; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: mov v28.s[3], w11
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v5.4s
-; CHECK-GI-NEXT: mov v31.s[3], w8
-; CHECK-GI-NEXT: smov w8, v22.b[11]
-; CHECK-GI-NEXT: fmov s8, w9
-; CHECK-GI-NEXT: mov v26.s[2], w12
-; CHECK-GI-NEXT: mla v19.4s, v16.4s, v25.4s
-; CHECK-GI-NEXT: mul v20.4s, v20.4s, v30.4s
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT: mov v8.s[1], wzr
-; CHECK-GI-NEXT: mul v22.4s, v23.4s, v31.4s
-; CHECK-GI-NEXT: mov v26.s[3], w8
-; CHECK-GI-NEXT: add v3.4s, v17.4s, v19.4s
-; CHECK-GI-NEXT: mla v20.4s, v18.4s, v28.4s
-; CHECK-GI-NEXT: mov v8.s[2], wzr
-; CHECK-GI-NEXT: mla v22.4s, v21.4s, v26.4s
-; CHECK-GI-NEXT: mov v8.s[3], wzr
-; CHECK-GI-NEXT: add v4.4s, v20.4s, v22.4s
-; CHECK-GI-NEXT: add v0.4s, v8.4s, v0.4s
-; CHECK-GI-NEXT: add v2.4s, v3.4s, v4.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v5.4s
+; CHECK-GI-NEXT: add v3.4s, v4.4s, v6.4s
+; CHECK-GI-NEXT: mov v18.s[2], wzr
+; CHECK-GI-NEXT: add v4.4s, v7.4s, v16.4s
+; CHECK-GI-NEXT: add v5.4s, v17.4s, v19.4s
+; CHECK-GI-NEXT: add v6.4s, v20.4s, v21.4s
+; CHECK-GI-NEXT: add v7.4s, v22.4s, v23.4s
; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-NEXT: add v0.4s, v2.4s, v0.4s
+; CHECK-GI-NEXT: add v1.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: mov v18.s[3], wzr
+; CHECK-GI-NEXT: add v2.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: add v3.4s, v6.4s, v7.4s
+; CHECK-GI-NEXT: add v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT: add v1.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: add v0.4s, v18.4s, v0.4s
+; CHECK-GI-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s0
-; CHECK-GI-NEXT: add w0, w8, w2
-; CHECK-GI-NEXT: ldr d8, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-NEXT: add w0, w8, w9
+; CHECK-GI-NEXT: add sp, sp, #112
; CHECK-GI-NEXT: ret
entry:
%0 = load <33 x i8>, ptr %a
@@ -4845,13 +5844,12 @@ define i32 @test_sdot_v33i8_double(<33 x i8> %a, <33 x i8> %b, <33 x i8> %c, <33
;
; CHECK-GI-LABEL: test_sdot_v33i8_double:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sub sp, sp, #96
-; CHECK-GI-NEXT: stp d15, d14, [sp, #16] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d13, d12, [sp, #32] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d11, d10, [sp, #48] // 16-byte Folded Spill
-; CHECK-GI-NEXT: stp d9, d8, [sp, #64] // 16-byte Folded Spill
-; CHECK-GI-NEXT: str x29, [sp, #80] // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 96
+; CHECK-GI-NEXT: stp d15, d14, [sp, #-80]! // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp d13, d12, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp d11, d10, [sp, #32] // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp d9, d8, [sp, #48] // 16-byte Folded Spill
+; CHECK-GI-NEXT: str x29, [sp, #64] // 8-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 80
; CHECK-GI-NEXT: .cfi_offset w29, -16
; CHECK-GI-NEXT: .cfi_offset b8, -24
; CHECK-GI-NEXT: .cfi_offset b9, -32
@@ -4861,508 +5859,762 @@ define i32 @test_sdot_v33i8_double(<33 x i8> %a, <33 x i8> %b, <33 x i8> %c, <33
; CHECK-GI-NEXT: .cfi_offset b13, -64
; CHECK-GI-NEXT: .cfi_offset b14, -72
; CHECK-GI-NEXT: .cfi_offset b15, -80
-; CHECK-GI-NEXT: sxtb w8, w0
-; CHECK-GI-NEXT: sxtb w9, w1
-; CHECK-GI-NEXT: sxtb w10, w2
-; CHECK-GI-NEXT: sxtb w11, w4
-; CHECK-GI-NEXT: sxtb w12, w5
-; CHECK-GI-NEXT: sxtb w13, w7
-; CHECK-GI-NEXT: fmov s28, w8
+; CHECK-GI-NEXT: lsl w8, w0, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #80]
+; CHECK-GI-NEXT: lsl w11, w1, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #88]
+; CHECK-GI-NEXT: ldr w13, [sp, #128]
+; CHECK-GI-NEXT: ldr w14, [sp, #136]
+; CHECK-GI-NEXT: sbfx w12, w8, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w8, w11, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w11, w2, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: fmov s22, w12
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #152]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w16, w7, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: fmov s23, w10
+; CHECK-GI-NEXT: sbfx w10, w11, #8, #8
+; CHECK-GI-NEXT: lsl w11, w3, #8
+; CHECK-GI-NEXT: mov v22.h[1], w8
; CHECK-GI-NEXT: ldr w8, [sp, #96]
-; CHECK-GI-NEXT: fmov s0, wzr
-; CHECK-GI-NEXT: fmov s25, w11
-; CHECK-GI-NEXT: sxtb w11, w6
-; CHECK-GI-NEXT: ldr w14, [sp, #528]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: fmov s18, wzr
-; CHECK-GI-NEXT: fmov s20, wzr
-; CHECK-GI-NEXT: mov v28.s[1], w9
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #176]
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: mov v23.h[1], w9
; CHECK-GI-NEXT: ldr w9, [sp, #104]
-; CHECK-GI-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-GI-NEXT: fmov s24, w8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: ldr w17, [sp, #224]
+; CHECK-GI-NEXT: mov v22.h[2], w10
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w11, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w11, w4, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v23.h[2], w8
; CHECK-GI-NEXT: ldr w8, [sp, #112]
-; CHECK-GI-NEXT: mov v25.s[1], w12
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w12, [sp, #136]
-; CHECK-GI-NEXT: mov v18.s[1], wzr
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v20.s[1], wzr
; CHECK-GI-NEXT: fmov s19, wzr
-; CHECK-GI-NEXT: mov v28.s[2], w10
-; CHECK-GI-NEXT: sxtb w10, w3
-; CHECK-GI-NEXT: mov v24.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #128]
-; CHECK-GI-NEXT: mov v25.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #168]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v18.s[2], wzr
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
; CHECK-GI-NEXT: fmov s21, wzr
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v20.s[2], wzr
-; CHECK-GI-NEXT: mov v28.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #160]
-; CHECK-GI-NEXT: mov v24.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #120]
-; CHECK-GI-NEXT: fmov s30, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #144]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v25.s[3], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #200]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v19.s[1], wzr
-; CHECK-GI-NEXT: fmov s22, w10
-; CHECK-GI-NEXT: mov v30.s[1], w12
-; CHECK-GI-NEXT: ldr w10, [sp, #176]
-; CHECK-GI-NEXT: mov v24.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #224]
-; CHECK-GI-NEXT: ldr w12, [sp, #152]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mov v22.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #192]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v30.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #232]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: fmov s23, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #240]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v18.s[3], wzr
-; CHECK-GI-NEXT: mov v20.s[3], wzr
-; CHECK-GI-NEXT: mov v22.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #184]
-; CHECK-GI-NEXT: fmov s26, w11
-; CHECK-GI-NEXT: mov v23.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #256]
-; CHECK-GI-NEXT: ldr w11, [sp, #208]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v30.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #264]
-; CHECK-GI-NEXT: mov v26.s[1], w13
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v22.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #296]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: fmov s29, w9
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w13, [sp, #216]
-; CHECK-GI-NEXT: sxtb w9, w10
-; CHECK-GI-NEXT: mov v23.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #248]
-; CHECK-GI-NEXT: mov v26.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #304]
-; CHECK-GI-NEXT: ldr w10, [sp, #272]
-; CHECK-GI-NEXT: fmov s31, w9
-; CHECK-GI-NEXT: mov v29.s[1], w12
-; CHECK-GI-NEXT: ldr w9, [sp, #312]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: ldr w12, [sp, #280]
+; CHECK-GI-NEXT: mov v22.h[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #144]
+; CHECK-GI-NEXT: lsl w8, w8, #8
; CHECK-GI-NEXT: fmov s16, wzr
-; CHECK-GI-NEXT: mov v31.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #328]
-; CHECK-GI-NEXT: mov v23.s[3], w8
-; CHECK-GI-NEXT: sxtb w8, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #360]
-; CHECK-GI-NEXT: mov v29.s[2], w10
-; CHECK-GI-NEXT: sxtb w10, w11
-; CHECK-GI-NEXT: mov v26.s[3], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #336]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #368]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v31.s[2], w8
-; CHECK-GI-NEXT: fmov s0, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #320]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: fmov s12, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v29.s[3], w12
-; CHECK-GI-NEXT: ldr w9, [sp, #376]
-; CHECK-GI-NEXT: mov v0.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #344]
-; CHECK-GI-NEXT: ldr w8, [sp, #288]
-; CHECK-GI-NEXT: mov v12.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #392]
-; CHECK-GI-NEXT: mov v31.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #424]
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: ldr w13, [sp, #400]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v0.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #432]
-; CHECK-GI-NEXT: fmov s13, w11
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v12.s[2], w9
-; CHECK-GI-NEXT: fmov s8, w10
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w10, [sp, #440]
-; CHECK-GI-NEXT: ldr w11, [sp, #384]
-; CHECK-GI-NEXT: ldr w9, [sp, #352]
+; CHECK-GI-NEXT: fmov s18, wzr
; CHECK-GI-NEXT: fmov s17, wzr
-; CHECK-GI-NEXT: mov v13.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #408]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v8.s[1], w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w12, [sp, #456]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s3, wzr
-; CHECK-GI-NEXT: mov v12.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #488]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v13.s[2], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #496]
-; CHECK-GI-NEXT: mov v0.s[3], w9
-; CHECK-GI-NEXT: mov v8.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #416]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w9, [sp, #464]
-; CHECK-GI-NEXT: fmov s14, w12
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: fmov s9, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #504]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w12, [sp, #448]
-; CHECK-GI-NEXT: mul v27.4s, v25.4s, v0.4s
-; CHECK-GI-NEXT: mov v13.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #560]
-; CHECK-GI-NEXT: sxtb w15, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #568]
-; CHECK-GI-NEXT: mov v9.s[1], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #520]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v14.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #472]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s10, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #552]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s15, w13
-; CHECK-GI-NEXT: mov v8.s[3], w12
-; CHECK-GI-NEXT: sxtb w12, w14
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: mov v14.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #480]
-; CHECK-GI-NEXT: mov v10.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #576]
-; CHECK-GI-NEXT: mov v9.s[2], w15
-; CHECK-GI-NEXT: mul w8, w8, w10
-; CHECK-GI-NEXT: mov v15.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #512]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w10, [sp, #584]
-; CHECK-GI-NEXT: ldr w13, [sp, #536]
-; CHECK-GI-NEXT: mla v27.4s, v28.4s, v31.4s
-; CHECK-GI-NEXT: mul v30.4s, v30.4s, v13.4s
-; CHECK-GI-NEXT: mov v10.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #592]
-; CHECK-GI-NEXT: fmov s25, w8
-; CHECK-GI-NEXT: mov v14.s[3], w9
-; CHECK-GI-NEXT: sxtb w9, w12
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #624]
-; CHECK-GI-NEXT: sxtb w13, w13
-; CHECK-GI-NEXT: mov v9.s[3], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #600]
-; CHECK-GI-NEXT: mla v30.4s, v24.4s, v12.4s
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v10.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #632]
-; CHECK-GI-NEXT: fmov s0, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #656]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s28, w11
-; CHECK-GI-NEXT: ldr w11, [sp, #688]
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v15.s[2], w13
-; CHECK-GI-NEXT: ldr w13, [sp, #544]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v0.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #664]
-; CHECK-GI-NEXT: mov v28.s[1], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #696]
-; CHECK-GI-NEXT: fmov s11, w8
-; CHECK-GI-NEXT: fmov s31, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w11, [sp, #672]
-; CHECK-GI-NEXT: ldr w8, [sp, #616]
-; CHECK-GI-NEXT: mov v11.s[1], w9
-; CHECK-GI-NEXT: mov v15.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #608]
-; CHECK-GI-NEXT: mov v31.s[1], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #704]
-; CHECK-GI-NEXT: ldr w9, [sp, #640]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mul v24.4s, v26.4s, v14.4s
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v11.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #712]
-; CHECK-GI-NEXT: mov v0.s[2], w12
-; CHECK-GI-NEXT: mov v31.s[2], w10
-; CHECK-GI-NEXT: ldr w12, [sp, #648]
-; CHECK-GI-NEXT: mov v28.s[2], w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: ldr w10, [sp, #720]
-; CHECK-GI-NEXT: ldr w9, [sp, #680]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mul v26.4s, v29.4s, v15.4s
-; CHECK-GI-NEXT: mla v24.4s, v22.4s, v8.4s
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v0.s[3], w8
-; CHECK-GI-NEXT: mov v31.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #784]
-; CHECK-GI-NEXT: mov v28.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #752]
-; CHECK-GI-NEXT: fmov s13, w10
-; CHECK-GI-NEXT: ldr w10, [sp, #792]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mov v11.s[3], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #760]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w8, [sp, #728]
-; CHECK-GI-NEXT: fmov s14, w11
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w11, [sp, #744]
-; CHECK-GI-NEXT: fmov s12, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #824]
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: mla v26.4s, v23.4s, v9.4s
-; CHECK-GI-NEXT: ldr w13, [sp, #984]
-; CHECK-GI-NEXT: mov v14.s[1], w10
-; CHECK-GI-NEXT: sxtb w10, w12
-; CHECK-GI-NEXT: mov v13.s[1], w8
-; CHECK-GI-NEXT: mov v12.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #832]
-; CHECK-GI-NEXT: ldr w8, [sp, #736]
-; CHECK-GI-NEXT: fmov s29, w10
-; CHECK-GI-NEXT: ldr w12, [sp, #768]
-; CHECK-GI-NEXT: ldr w10, [sp, #800]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v23.h[3], w9
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #120]
+; CHECK-GI-NEXT: fmov s20, wzr
; CHECK-GI-NEXT: fmov s6, wzr
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: sxtb w10, w10
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: mov v22.h[4], w11
+; CHECK-GI-NEXT: lsl w11, w5, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: fmov s7, wzr
; CHECK-GI-NEXT: fmov s2, wzr
-; CHECK-GI-NEXT: mov v29.s[1], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #840]
-; CHECK-GI-NEXT: mov v13.s[2], w8
-; CHECK-GI-NEXT: mov v12.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #808]
-; CHECK-GI-NEXT: mov v14.s[2], w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: ldr w8, [sp, #776]
-; CHECK-GI-NEXT: ldr w10, [sp, #848]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s5, wzr
+; CHECK-GI-NEXT: fmov s24, w10
+; CHECK-GI-NEXT: mov v23.h[4], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #160]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #168]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w8, w8, #8
; CHECK-GI-NEXT: fmov s4, wzr
-; CHECK-GI-NEXT: mov v29.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #856]
-; CHECK-GI-NEXT: mov v13.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #864]
-; CHECK-GI-NEXT: mov v14.s[3], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #888]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: fmov s7, wzr
-; CHECK-GI-NEXT: fmov s15, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #920]
-; CHECK-GI-NEXT: mov v12.s[3], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #872]
-; CHECK-GI-NEXT: mov v29.s[3], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #896]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: fmov s22, w12
-; CHECK-GI-NEXT: ldr w12, [sp, #928]
-; CHECK-GI-NEXT: mov v15.s[1], w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: fmov s8, w9
-; CHECK-GI-NEXT: ldr w9, [sp, #952]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: ldr w11, [sp, #904]
-; CHECK-GI-NEXT: mov v22.s[1], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #936]
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: mov v19.s[2], wzr
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mov v15.s[2], w8
-; CHECK-GI-NEXT: ldr w8, [sp, #960]
-; CHECK-GI-NEXT: mov v8.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #880]
-; CHECK-GI-NEXT: fmov s23, w9
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: ldr w9, [sp, #944]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v22.s[2], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #912]
-; CHECK-GI-NEXT: mov v8.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #968]
-; CHECK-GI-NEXT: mov v23.s[1], w8
-; CHECK-GI-NEXT: mov v15.s[3], w12
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w12, w13
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: add v18.4s, v18.4s, v20.4s
-; CHECK-GI-NEXT: mov v22.s[3], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #992]
-; CHECK-GI-NEXT: fmov s9, w12
-; CHECK-GI-NEXT: mov v23.s[2], w10
-; CHECK-GI-NEXT: ldr w10, [sp, #1048]
-; CHECK-GI-NEXT: ldr w12, [sp, #1056]
-; CHECK-GI-NEXT: mul v0.4s, v0.4s, v15.4s
-; CHECK-GI-NEXT: sxtb w13, w11
-; CHECK-GI-NEXT: mov v8.s[3], w9
-; CHECK-GI-NEXT: sxtb w11, w10
-; CHECK-GI-NEXT: ldr w9, [sp, #1000]
-; CHECK-GI-NEXT: sxtb w12, w12
-; CHECK-GI-NEXT: mov v9.s[1], w13
-; CHECK-GI-NEXT: ldr w10, [sp, #1016]
-; CHECK-GI-NEXT: ldr w8, [sp, #816]
-; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: fmov s3, wzr
+; CHECK-GI-NEXT: mov v24.h[1], w12
+; CHECK-GI-NEXT: lsl w12, w6, #8
+; CHECK-GI-NEXT: mov v22.h[5], w11
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v23.h[5], w9
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #184]
+; CHECK-GI-NEXT: ldr w9, [sp, #192]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: fmov s5, wzr
; CHECK-GI-NEXT: fmov s1, wzr
+; CHECK-GI-NEXT: mov v24.h[2], w8
+; CHECK-GI-NEXT: mov v22.h[6], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #208]
+; CHECK-GI-NEXT: mov v23.h[6], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #216]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w8, [sp, #200]
+; CHECK-GI-NEXT: fmov s0, wzr
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v19.s[1], wzr
+; CHECK-GI-NEXT: mov v24.h[3], w10
+; CHECK-GI-NEXT: sbfx w10, w14, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #280]
+; CHECK-GI-NEXT: mov v22.h[7], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #288]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v23.h[7], w10
+; CHECK-GI-NEXT: lsl w18, w16, #8
+; CHECK-GI-NEXT: fmov s27, w12
+; CHECK-GI-NEXT: ldr w10, [sp, #232]
+; CHECK-GI-NEXT: sbfx w16, w14, #8, #8
+; CHECK-GI-NEXT: mov v24.h[4], w15
+; CHECK-GI-NEXT: lsl w15, w11, #8
+; CHECK-GI-NEXT: sbfx w14, w18, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #296]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: fmov s25, w16
+; CHECK-GI-NEXT: ldr w16, [sp, #344]
+; CHECK-GI-NEXT: mov v27.h[1], w13
+; CHECK-GI-NEXT: lsl w13, w17, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #240]
+; CHECK-GI-NEXT: sbfx w17, w10, #8, #8
+; CHECK-GI-NEXT: mov v25.h[1], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #352]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v24.h[5], w15
+; CHECK-GI-NEXT: mov v27.h[2], w13
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #304]
+; CHECK-GI-NEXT: fmov s26, w16
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #248]
+; CHECK-GI-NEXT: mov v25.h[2], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #360]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v24.h[6], w9
+; CHECK-GI-NEXT: lsl w16, w11, #8
+; CHECK-GI-NEXT: mov v26.h[1], w13
+; CHECK-GI-NEXT: mov v27.h[3], w17
+; CHECK-GI-NEXT: sbfx w13, w14, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #312]
+; CHECK-GI-NEXT: ldr w17, [sp, #328]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #256]
+; CHECK-GI-NEXT: ldr w11, [sp, #264]
+; CHECK-GI-NEXT: mov v25.h[3], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #368]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v26.h[2], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #320]
+; CHECK-GI-NEXT: mov v27.h[4], w12
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w9, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #408]
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w12, w13, #8, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #376]
+; CHECK-GI-NEXT: mov v25.h[4], w9
+; CHECK-GI-NEXT: sbfx w9, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w14, w15, #8, #8
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: mov v26.h[3], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #416]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w16, w17, #8
+; CHECK-GI-NEXT: mov v27.h[5], w9
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v25.h[5], w14
+; CHECK-GI-NEXT: fmov s29, w15
+; CHECK-GI-NEXT: ldr w14, [sp, #384]
+; CHECK-GI-NEXT: ldr w15, [sp, #472]
+; CHECK-GI-NEXT: mov v26.h[4], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #424]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v29.h[1], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #480]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v25.h[6], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #432]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v26.h[5], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #392]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: mov v29.h[2], w13
+; CHECK-GI-NEXT: fmov s28, w15
+; CHECK-GI-NEXT: ldr w9, [sp, #336]
+; CHECK-GI-NEXT: ldr w13, [sp, #488]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: ldr w15, [sp, #440]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v28.h[1], w12
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: mov v29.h[3], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #496]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #400]
+; CHECK-GI-NEXT: mov v26.h[6], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #448]
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v28.h[2], w13
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: mov v25.h[7], w9
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v29.h[4], w15
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #456]
+; CHECK-GI-NEXT: ldr w15, [sp, #504]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: sbfx w9, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w12, w14, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: mov v28.h[3], w16
+; CHECK-GI-NEXT: ldr w15, [sp, #512]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mul v30.8h, v22.8h, v25.8h
+; CHECK-GI-NEXT: mov v26.h[7], w9
+; CHECK-GI-NEXT: mov v29.h[5], w12
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w9, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w14, w11, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w13, #8, #8
+; CHECK-GI-NEXT: lsl w13, w15, #8
+; CHECK-GI-NEXT: ldr w17, [sp, #464]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v28.h[4], w9
+; CHECK-GI-NEXT: mov v27.h[6], w10
+; CHECK-GI-NEXT: ldr w16, [sp, #520]
+; CHECK-GI-NEXT: sbfx w10, w13, #8, #8
+; CHECK-GI-NEXT: smov w13, v30.h[0]
+; CHECK-GI-NEXT: mov v24.h[7], w8
+; CHECK-GI-NEXT: lsl w8, w17, #8
+; CHECK-GI-NEXT: mov v29.h[6], w11
+; CHECK-GI-NEXT: mul v26.8h, v23.8h, v26.8h
+; CHECK-GI-NEXT: lsl w15, w16, #8
+; CHECK-GI-NEXT: smov w16, v30.h[1]
+; CHECK-GI-NEXT: ldr w12, [sp, #528]
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v28.h[5], w10
+; CHECK-GI-NEXT: mov v27.h[7], w14
+; CHECK-GI-NEXT: fmov s22, w13
+; CHECK-GI-NEXT: sbfx w10, w15, #8, #8
+; CHECK-GI-NEXT: smov w14, v30.h[4]
+; CHECK-GI-NEXT: mov v29.h[7], w8
+; CHECK-GI-NEXT: smov w15, v26.h[0]
+; CHECK-GI-NEXT: smov w13, v30.h[2]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #544]
+; CHECK-GI-NEXT: ldr w11, [sp, #552]
+; CHECK-GI-NEXT: mov v22.s[1], w16
+; CHECK-GI-NEXT: smov w16, v26.h[4]
+; CHECK-GI-NEXT: mov v28.h[6], w10
+; CHECK-GI-NEXT: smov w10, v26.h[1]
+; CHECK-GI-NEXT: fmov s23, w14
+; CHECK-GI-NEXT: smov w14, v26.h[5]
+; CHECK-GI-NEXT: mul v29.8h, v24.8h, v29.8h
+; CHECK-GI-NEXT: fmov s24, w15
+; CHECK-GI-NEXT: smov w15, v26.h[2]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: smov w8, v30.h[5]
+; CHECK-GI-NEXT: smov w17, v30.h[7]
+; CHECK-GI-NEXT: fmov s25, w16
+; CHECK-GI-NEXT: mov v22.s[2], w13
+; CHECK-GI-NEXT: smov w13, v30.h[3]
+; CHECK-GI-NEXT: mov v24.s[1], w10
+; CHECK-GI-NEXT: smov w16, v26.h[6]
+; CHECK-GI-NEXT: sbfx w10, w12, #8, #8
+; CHECK-GI-NEXT: smov w18, v29.h[0]
+; CHECK-GI-NEXT: smov w0, v29.h[1]
+; CHECK-GI-NEXT: ldr w12, [sp, #560]
+; CHECK-GI-NEXT: mov v25.s[1], w14
+; CHECK-GI-NEXT: smov w14, v26.h[7]
+; CHECK-GI-NEXT: mov v28.h[7], w10
+; CHECK-GI-NEXT: mov v22.s[3], w13
+; CHECK-GI-NEXT: smov w13, v26.h[3]
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v24.s[2], w15
+; CHECK-GI-NEXT: smov w15, v29.h[2]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: fmov s26, w18
+; CHECK-GI-NEXT: mov v23.s[1], w8
+; CHECK-GI-NEXT: smov w8, v30.h[6]
+; CHECK-GI-NEXT: mov v25.s[2], w16
+; CHECK-GI-NEXT: lsl w16, w9, #8
+; CHECK-GI-NEXT: mul v31.8h, v27.8h, v28.8h
+; CHECK-GI-NEXT: ldr w10, [sp, #568]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #584]
+; CHECK-GI-NEXT: mov v24.s[3], w13
+; CHECK-GI-NEXT: smov w13, v29.h[4]
+; CHECK-GI-NEXT: mov v26.s[1], w0
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v23.s[2], w8
+; CHECK-GI-NEXT: mov v25.s[3], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #608]
+; CHECK-GI-NEXT: ldr w8, [sp, #576]
+; CHECK-GI-NEXT: fmov s8, w16
+; CHECK-GI-NEXT: ldr w16, [sp, #616]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: fmov s27, w13
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: mov v26.s[2], w15
+; CHECK-GI-NEXT: smov w15, v29.h[5]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #624]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v8.h[1], w11
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v23.s[3], w17
+; CHECK-GI-NEXT: fmov s9, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #632]
+; CHECK-GI-NEXT: smov w17, v31.h[1]
+; CHECK-GI-NEXT: mov v27.s[1], w15
+; CHECK-GI-NEXT: smov w15, v31.h[0]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v8.h[2], w12
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v9.h[1], w16
+; CHECK-GI-NEXT: smov w16, v31.h[2]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #592]
+; CHECK-GI-NEXT: ldr w12, [sp, #600]
+; CHECK-GI-NEXT: fmov s28, w15
+; CHECK-GI-NEXT: smov w15, v29.h[6]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: mov v8.h[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #640]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v9.h[2], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #672]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v28.s[1], w17
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v27.s[2], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #680]
+; CHECK-GI-NEXT: mov v8.h[4], w8
+; CHECK-GI-NEXT: smov w8, v31.h[4]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: mov v9.h[3], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #688]
+; CHECK-GI-NEXT: mov v28.s[2], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #648]
+; CHECK-GI-NEXT: fmov s10, w14
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #656]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: fmov s30, w8
+; CHECK-GI-NEXT: sbfx w8, w10, #8, #8
+; CHECK-GI-NEXT: smov w10, v31.h[5]
+; CHECK-GI-NEXT: mov v8.h[5], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #696]
+; CHECK-GI-NEXT: mov v10.h[1], w15
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v9.h[4], w8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: ldr w8, [sp, #704]
+; CHECK-GI-NEXT: ldr w15, [sp, #664]
+; CHECK-GI-NEXT: ldr w17, [sp, #768]
+; CHECK-GI-NEXT: mov v30.s[1], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #744]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: mov v10.h[2], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #736]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v9.h[5], w16
+; CHECK-GI-NEXT: mov v8.h[6], w11
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #712]
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #720]
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v10.h[3], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #752]
+; CHECK-GI-NEXT: mov v8.h[7], w12
+; CHECK-GI-NEXT: sbfx w12, w8, #8, #8
+; CHECK-GI-NEXT: lsl w18, w16, #8
+; CHECK-GI-NEXT: fmov s11, w13
+; CHECK-GI-NEXT: ldr w13, [sp, #760]
+; CHECK-GI-NEXT: ldr w8, [sp, #784]
+; CHECK-GI-NEXT: mov v21.s[1], wzr
; CHECK-GI-NEXT: mov v16.s[1], wzr
-; CHECK-GI-NEXT: mla v0.4s, v10.4s, v29.4s
-; CHECK-GI-NEXT: fmov s10, w11
-; CHECK-GI-NEXT: sxtb w10, w10
-; CHECK-GI-NEXT: ldr w11, [sp, #1024]
-; CHECK-GI-NEXT: mul v20.4s, v11.4s, v8.4s
-; CHECK-GI-NEXT: ldr q8, [sp] // 16-byte Folded Reload
-; CHECK-GI-NEXT: mov v9.s[2], w9
-; CHECK-GI-NEXT: ldr w9, [sp, #1008]
-; CHECK-GI-NEXT: fmov s29, w10
-; CHECK-GI-NEXT: mov v10.s[1], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #1064]
-; CHECK-GI-NEXT: sxtb w11, w11
-; CHECK-GI-NEXT: sxtb w9, w9
+; CHECK-GI-NEXT: mov v18.s[1], wzr
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v10.h[4], w12
+; CHECK-GI-NEXT: sbfx w12, w15, #8, #8
+; CHECK-GI-NEXT: mov v11.h[1], w10
+; CHECK-GI-NEXT: sbfx w10, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w9, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: ldr w9, [sp, #776]
+; CHECK-GI-NEXT: lsl w8, w8, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v9.h[6], w10
+; CHECK-GI-NEXT: lsl w10, w11, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #808]
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w8, w8, #8, #8
+; CHECK-GI-NEXT: mov v11.h[2], w14
+; CHECK-GI-NEXT: ldr w14, [sp, #816]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
; CHECK-GI-NEXT: mov v17.s[1], wzr
-; CHECK-GI-NEXT: mov v3.s[1], wzr
-; CHECK-GI-NEXT: sxtb w12, w12
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: mov v9.h[7], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #824]
+; CHECK-GI-NEXT: sbfx w16, w11, #8, #8
+; CHECK-GI-NEXT: mov v10.h[5], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #832]
+; CHECK-GI-NEXT: mov v11.h[3], w13
+; CHECK-GI-NEXT: sbfx w15, w14, #8, #8
+; CHECK-GI-NEXT: lsl w14, w17, #8
+; CHECK-GI-NEXT: fmov s12, w16
+; CHECK-GI-NEXT: ldr w16, [sp, #872]
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #840]
+; CHECK-GI-NEXT: sbfx w13, w18, #8, #8
+; CHECK-GI-NEXT: sbfx w17, w12, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: ldr w12, [sp, #856]
+; CHECK-GI-NEXT: mov v12.h[1], w15
+; CHECK-GI-NEXT: mov v11.h[4], w14
+; CHECK-GI-NEXT: ldr w15, [sp, #880]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v10.h[6], w13
+; CHECK-GI-NEXT: ldr w13, [sp, #848]
+; CHECK-GI-NEXT: lsl w14, w15, #8
+; CHECK-GI-NEXT: sbfx w15, w16, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #888]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: mov v20.s[1], wzr
+; CHECK-GI-NEXT: mov v12.h[2], w17
+; CHECK-GI-NEXT: lsl w17, w10, #8
+; CHECK-GI-NEXT: mov v11.h[5], w9
+; CHECK-GI-NEXT: fmov s13, w15
+; CHECK-GI-NEXT: ldr w9, [sp, #936]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: sbfx w15, w17, #8, #8
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: lsl w9, w9, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: ldr w10, [sp, #864]
+; CHECK-GI-NEXT: mov v12.h[3], w15
+; CHECK-GI-NEXT: mov v11.h[6], w8
+; CHECK-GI-NEXT: sbfx w8, w11, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #1000]
+; CHECK-GI-NEXT: mov v13.h[1], w14
+; CHECK-GI-NEXT: ldr w15, [sp, #944]
+; CHECK-GI-NEXT: sbfx w9, w9, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #896]
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: mov v12.h[4], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #1008]
+; CHECK-GI-NEXT: fmov s14, w9
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v13.h[2], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #952]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: lsl w17, w8, #8
+; CHECK-GI-NEXT: smov w8, v29.h[3]
+; CHECK-GI-NEXT: smov w9, v29.h[7]
+; CHECK-GI-NEXT: fmov s29, w11
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v14.h[1], w15
+; CHECK-GI-NEXT: sbfx w15, w17, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #904]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: mov v12.h[5], w13
+; CHECK-GI-NEXT: mov v13.h[3], w14
+; CHECK-GI-NEXT: mov v29.h[1], w15
+; CHECK-GI-NEXT: ldr w15, [sp, #960]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #1016]
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #1024]
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v14.h[2], w16
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: ldr w16, [sp, #912]
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v13.h[4], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #968]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v12.h[6], w12
+; CHECK-GI-NEXT: ldr w12, [sp, #976]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mov v14.h[3], w15
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v29.h[2], w14
+; CHECK-GI-NEXT: ldr w15, [sp, #1032]
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: ldr w14, [sp, #920]
+; CHECK-GI-NEXT: mov v26.s[3], w8
+; CHECK-GI-NEXT: sbfx w16, w16, #8, #8
+; CHECK-GI-NEXT: lsl w15, w15, #8
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: mov v14.h[4], w11
+; CHECK-GI-NEXT: mov v29.h[3], w13
+; CHECK-GI-NEXT: ldr w11, [sp, #984]
+; CHECK-GI-NEXT: lsl w14, w14, #8
+; CHECK-GI-NEXT: sbfx w15, w15, #8, #8
+; CHECK-GI-NEXT: mov v13.h[5], w16
+; CHECK-GI-NEXT: ldr w16, [sp, #1040]
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #928]
+; CHECK-GI-NEXT: sbfx w14, w14, #8, #8
+; CHECK-GI-NEXT: mov v12.h[7], w10
+; CHECK-GI-NEXT: mov v27.s[3], w9
+; CHECK-GI-NEXT: mov v14.h[5], w12
+; CHECK-GI-NEXT: mov v29.h[4], w15
+; CHECK-GI-NEXT: lsl w16, w16, #8
+; CHECK-GI-NEXT: sbfx w10, w11, #8, #8
+; CHECK-GI-NEXT: lsl w13, w13, #8
+; CHECK-GI-NEXT: mov v13.h[6], w14
+; CHECK-GI-NEXT: ldr w12, [sp, #1048]
+; CHECK-GI-NEXT: sbfx w14, w16, #8, #8
+; CHECK-GI-NEXT: ldr w11, [sp, #728]
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: mul v15.8h, v8.8h, v12.8h
+; CHECK-GI-NEXT: smov w16, v31.h[6]
+; CHECK-GI-NEXT: mov v14.h[6], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #992]
+; CHECK-GI-NEXT: mov v29.h[5], w14
+; CHECK-GI-NEXT: lsl w12, w12, #8
+; CHECK-GI-NEXT: lsl w11, w11, #8
+; CHECK-GI-NEXT: mov v13.h[7], w13
+; CHECK-GI-NEXT: lsl w10, w10, #8
+; CHECK-GI-NEXT: ldr w13, [sp, #792]
+; CHECK-GI-NEXT: ldr w14, [sp, #1056]
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w11, w11, #8, #8
+; CHECK-GI-NEXT: mov v30.s[2], w16
+; CHECK-GI-NEXT: sbfx w10, w10, #8, #8
+; CHECK-GI-NEXT: smov w8, v15.h[1]
+; CHECK-GI-NEXT: smov w9, v15.h[5]
+; CHECK-GI-NEXT: mov v29.h[6], w12
+; CHECK-GI-NEXT: lsl w12, w13, #8
+; CHECK-GI-NEXT: lsl w13, w14, #8
+; CHECK-GI-NEXT: mov v10.h[7], w11
+; CHECK-GI-NEXT: mov v14.h[7], w10
+; CHECK-GI-NEXT: mul v12.8h, v9.8h, v13.8h
+; CHECK-GI-NEXT: sbfx w12, w12, #8, #8
+; CHECK-GI-NEXT: sbfx w13, w13, #8, #8
+; CHECK-GI-NEXT: smov w10, v15.h[0]
+; CHECK-GI-NEXT: smov w11, v15.h[4]
+; CHECK-GI-NEXT: smov w14, v31.h[7]
+; CHECK-GI-NEXT: smov w15, v31.h[3]
+; CHECK-GI-NEXT: mov v11.h[7], w12
+; CHECK-GI-NEXT: mov v29.h[7], w13
; CHECK-GI-NEXT: mov v6.s[1], wzr
+; CHECK-GI-NEXT: mul v13.8h, v10.8h, v14.8h
+; CHECK-GI-NEXT: smov w12, v12.h[0]
+; CHECK-GI-NEXT: smov w13, v12.h[1]
+; CHECK-GI-NEXT: mov v7.s[1], wzr
; CHECK-GI-NEXT: mov v2.s[1], wzr
-; CHECK-GI-NEXT: mov v5.s[1], wzr
; CHECK-GI-NEXT: mov v4.s[1], wzr
-; CHECK-GI-NEXT: mov v7.s[1], wzr
-; CHECK-GI-NEXT: mov v10.s[2], w12
-; CHECK-GI-NEXT: ldr w12, [sp, #1080]
-; CHECK-GI-NEXT: mov v8.s[1], wzr
-; CHECK-GI-NEXT: mov v9.s[3], w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: ldr w10, [sp, #1032]
-; CHECK-GI-NEXT: sxtb w9, w12
-; CHECK-GI-NEXT: mov v29.s[1], w11
-; CHECK-GI-NEXT: ldr w11, [sp, #1072]
-; CHECK-GI-NEXT: mov v19.s[3], wzr
-; CHECK-GI-NEXT: mov v21.s[3], wzr
+; CHECK-GI-NEXT: fmov s31, w11
+; CHECK-GI-NEXT: mov v30.s[3], w14
+; CHECK-GI-NEXT: smov w11, v12.h[4]
+; CHECK-GI-NEXT: mul v14.8h, v11.8h, v29.8h
+; CHECK-GI-NEXT: fmov s29, w10
+; CHECK-GI-NEXT: smov w10, v15.h[2]
+; CHECK-GI-NEXT: smov w14, v13.h[0]
+; CHECK-GI-NEXT: fmov s8, w12
+; CHECK-GI-NEXT: smov w16, v13.h[1]
+; CHECK-GI-NEXT: mov v31.s[1], w9
+; CHECK-GI-NEXT: smov w9, v12.h[2]
+; CHECK-GI-NEXT: mov v28.s[3], w15
+; CHECK-GI-NEXT: mov v29.s[1], w8
+; CHECK-GI-NEXT: smov w8, v15.h[6]
+; CHECK-GI-NEXT: smov w15, v12.h[5]
+; CHECK-GI-NEXT: mov v8.s[1], w13
+; CHECK-GI-NEXT: fmov s9, w11
+; CHECK-GI-NEXT: smov w12, v15.h[3]
+; CHECK-GI-NEXT: fmov s10, w14
+; CHECK-GI-NEXT: smov w14, v13.h[2]
+; CHECK-GI-NEXT: smov w11, v12.h[6]
+; CHECK-GI-NEXT: smov w13, v15.h[7]
+; CHECK-GI-NEXT: mov v3.s[1], wzr
+; CHECK-GI-NEXT: mov v5.s[1], wzr
+; CHECK-GI-NEXT: mov v31.s[2], w8
+; CHECK-GI-NEXT: smov w8, v13.h[4]
+; CHECK-GI-NEXT: mov v29.s[2], w10
+; CHECK-GI-NEXT: mov v10.s[1], w16
+; CHECK-GI-NEXT: smov w16, v14.h[0]
+; CHECK-GI-NEXT: mov v8.s[2], w9
+; CHECK-GI-NEXT: smov w9, v13.h[5]
+; CHECK-GI-NEXT: smov w10, v12.h[3]
+; CHECK-GI-NEXT: mov v9.s[1], w15
+; CHECK-GI-NEXT: smov w15, v13.h[6]
; CHECK-GI-NEXT: mov v1.s[1], wzr
-; CHECK-GI-NEXT: mul w8, w8, w9
+; CHECK-GI-NEXT: mov v0.s[1], wzr
+; CHECK-GI-NEXT: fmov s11, w8
+; CHECK-GI-NEXT: smov w8, v14.h[1]
+; CHECK-GI-NEXT: mov v29.s[3], w12
+; CHECK-GI-NEXT: mov v10.s[2], w14
+; CHECK-GI-NEXT: smov w14, v12.h[7]
+; CHECK-GI-NEXT: fmov s12, w16
+; CHECK-GI-NEXT: smov w12, v14.h[4]
+; CHECK-GI-NEXT: mov v8.s[3], w10
+; CHECK-GI-NEXT: ldr w10, [sp, #536]
+; CHECK-GI-NEXT: mov v11.s[1], w9
+; CHECK-GI-NEXT: ldr w9, [sp, #272]
+; CHECK-GI-NEXT: mov v9.s[2], w11
+; CHECK-GI-NEXT: ldr w11, [sp, #800]
+; CHECK-GI-NEXT: mov v12.s[1], w8
+; CHECK-GI-NEXT: ldr w8, [sp, #1064]
+; CHECK-GI-NEXT: mov v31.s[3], w13
+; CHECK-GI-NEXT: smov w13, v14.h[5]
+; CHECK-GI-NEXT: sxtb w9, w9
; CHECK-GI-NEXT: sxtb w10, w10
; CHECK-GI-NEXT: sxtb w11, w11
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: mov v11.s[2], w15
+; CHECK-GI-NEXT: smov w15, v13.h[3]
+; CHECK-GI-NEXT: smov w16, v13.h[7]
+; CHECK-GI-NEXT: fmov s13, w12
+; CHECK-GI-NEXT: mul w9, w9, w10
+; CHECK-GI-NEXT: smov w12, v14.h[2]
+; CHECK-GI-NEXT: mul w8, w11, w8
+; CHECK-GI-NEXT: mov v19.s[2], wzr
+; CHECK-GI-NEXT: mov v21.s[2], wzr
; CHECK-GI-NEXT: mov v16.s[2], wzr
+; CHECK-GI-NEXT: mov v18.s[2], wzr
; CHECK-GI-NEXT: mov v17.s[2], wzr
-; CHECK-GI-NEXT: mov v3.s[2], wzr
+; CHECK-GI-NEXT: mov v13.s[1], w13
+; CHECK-GI-NEXT: smov w13, v14.h[6]
+; CHECK-GI-NEXT: sxth w9, w9
+; CHECK-GI-NEXT: sxth w10, w8
+; CHECK-GI-NEXT: mov v20.s[2], wzr
; CHECK-GI-NEXT: mov v6.s[2], wzr
+; CHECK-GI-NEXT: mov v7.s[2], wzr
; CHECK-GI-NEXT: mov v2.s[2], wzr
-; CHECK-GI-NEXT: mov v5.s[2], wzr
; CHECK-GI-NEXT: mov v4.s[2], wzr
-; CHECK-GI-NEXT: mov v7.s[2], wzr
-; CHECK-GI-NEXT: mov v8.s[2], wzr
-; CHECK-GI-NEXT: mov v29.s[2], w10
-; CHECK-GI-NEXT: mov v10.s[3], w11
-; CHECK-GI-NEXT: add v19.4s, v19.4s, v21.4s
-; CHECK-GI-NEXT: ldr w9, [sp, #976]
-; CHECK-GI-NEXT: fmov s21, w8
-; CHECK-GI-NEXT: ldr w8, [sp, #1040]
+; CHECK-GI-NEXT: mov v3.s[2], wzr
+; CHECK-GI-NEXT: mov v5.s[2], wzr
+; CHECK-GI-NEXT: add v22.4s, v22.4s, v23.4s
+; CHECK-GI-NEXT: add v25.4s, v24.4s, v25.4s
+; CHECK-GI-NEXT: fmov s23, w9
+; CHECK-GI-NEXT: fmov s24, w10
+; CHECK-GI-NEXT: mov v12.s[2], w12
+; CHECK-GI-NEXT: mov v13.s[2], w13
+; CHECK-GI-NEXT: smov w8, v14.h[3]
+; CHECK-GI-NEXT: smov w9, v14.h[7]
; CHECK-GI-NEXT: mov v1.s[2], wzr
+; CHECK-GI-NEXT: mov v0.s[2], wzr
+; CHECK-GI-NEXT: mov v19.s[3], wzr
+; CHECK-GI-NEXT: mov v21.s[3], wzr
; CHECK-GI-NEXT: mov v16.s[3], wzr
+; CHECK-GI-NEXT: mov v18.s[3], wzr
; CHECK-GI-NEXT: mov v17.s[3], wzr
-; CHECK-GI-NEXT: sxtb w9, w9
-; CHECK-GI-NEXT: sxtb w8, w8
-; CHECK-GI-NEXT: mov v11.16b, v8.16b
-; CHECK-GI-NEXT: mov v3.s[3], wzr
+; CHECK-GI-NEXT: mov v20.s[3], wzr
; CHECK-GI-NEXT: mov v6.s[3], wzr
+; CHECK-GI-NEXT: mov v7.s[3], wzr
; CHECK-GI-NEXT: mov v2.s[3], wzr
-; CHECK-GI-NEXT: mov v5.s[3], wzr
; CHECK-GI-NEXT: mov v4.s[3], wzr
-; CHECK-GI-NEXT: mov v7.s[3], wzr
-; CHECK-GI-NEXT: mov v25.s[1], wzr
-; CHECK-GI-NEXT: mov v21.s[1], wzr
-; CHECK-GI-NEXT: mul v8.4s, v13.4s, v9.4s
-; CHECK-GI-NEXT: mul v9.4s, v14.4s, v10.4s
-; CHECK-GI-NEXT: mov v23.s[3], w9
-; CHECK-GI-NEXT: mov v29.s[3], w8
+; CHECK-GI-NEXT: mov v3.s[3], wzr
+; CHECK-GI-NEXT: mov v5.s[3], wzr
+; CHECK-GI-NEXT: mov v23.s[1], wzr
+; CHECK-GI-NEXT: mov v24.s[1], wzr
+; CHECK-GI-NEXT: mov v9.s[3], w14
+; CHECK-GI-NEXT: mov v10.s[3], w15
+; CHECK-GI-NEXT: mov v11.s[3], w16
; CHECK-GI-NEXT: mov v1.s[3], wzr
-; CHECK-GI-NEXT: mov v11.s[3], wzr
-; CHECK-GI-NEXT: add v16.4s, v16.4s, v17.4s
-; CHECK-GI-NEXT: add v3.4s, v3.4s, v6.4s
-; CHECK-GI-NEXT: add v2.4s, v2.4s, v5.4s
-; CHECK-GI-NEXT: add v4.4s, v4.4s, v7.4s
-; CHECK-GI-NEXT: mov v25.s[2], wzr
-; CHECK-GI-NEXT: mov v21.s[2], wzr
-; CHECK-GI-NEXT: mla v20.4s, v28.4s, v22.4s
-; CHECK-GI-NEXT: mla v8.4s, v31.4s, v23.4s
-; CHECK-GI-NEXT: mla v9.4s, v12.4s, v29.4s
-; CHECK-GI-NEXT: add v5.4s, v19.4s, v16.4s
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v18.4s
-; CHECK-GI-NEXT: add v3.4s, v11.4s, v3.4s
+; CHECK-GI-NEXT: mov v12.s[3], w8
+; CHECK-GI-NEXT: mov v13.s[3], w9
+; CHECK-GI-NEXT: mov v0.s[3], wzr
+; CHECK-GI-NEXT: add v19.4s, v19.4s, v21.4s
+; CHECK-GI-NEXT: add v16.4s, v16.4s, v18.4s
+; CHECK-GI-NEXT: add v17.4s, v17.4s, v20.4s
+; CHECK-GI-NEXT: add v6.4s, v6.4s, v7.4s
; CHECK-GI-NEXT: add v2.4s, v2.4s, v4.4s
-; CHECK-GI-NEXT: add v4.4s, v27.4s, v30.4s
-; CHECK-GI-NEXT: add v6.4s, v24.4s, v26.4s
-; CHECK-GI-NEXT: ldr x29, [sp, #80] // 8-byte Folded Reload
-; CHECK-GI-NEXT: mov v25.s[3], wzr
-; CHECK-GI-NEXT: mov v21.s[3], wzr
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v20.4s
-; CHECK-GI-NEXT: add v1.4s, v1.4s, v5.4s
-; CHECK-GI-NEXT: add v5.4s, v8.4s, v9.4s
-; CHECK-GI-NEXT: add v2.4s, v3.4s, v2.4s
-; CHECK-GI-NEXT: add v3.4s, v4.4s, v6.4s
-; CHECK-GI-NEXT: ldp d9, d8, [sp, #64] // 16-byte Folded Reload
-; CHECK-GI-NEXT: ldp d11, d10, [sp, #48] // 16-byte Folded Reload
-; CHECK-GI-NEXT: add v1.4s, v25.4s, v1.4s
-; CHECK-GI-NEXT: add v0.4s, v0.4s, v5.4s
-; CHECK-GI-NEXT: add v2.4s, v21.4s, v2.4s
-; CHECK-GI-NEXT: ldp d13, d12, [sp, #32] // 16-byte Folded Reload
-; CHECK-GI-NEXT: ldp d15, d14, [sp, #16] // 16-byte Folded Reload
-; CHECK-GI-NEXT: add v1.4s, v3.4s, v1.4s
+; CHECK-GI-NEXT: add v3.4s, v3.4s, v5.4s
+; CHECK-GI-NEXT: mov v23.s[2], wzr
+; CHECK-GI-NEXT: mov v24.s[2], wzr
+; CHECK-GI-NEXT: add v26.4s, v26.4s, v27.4s
+; CHECK-GI-NEXT: add v27.4s, v28.4s, v30.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v19.4s
+; CHECK-GI-NEXT: add v4.4s, v16.4s, v17.4s
+; CHECK-GI-NEXT: add v5.4s, v29.4s, v31.4s
+; CHECK-GI-NEXT: add v7.4s, v8.4s, v9.4s
+; CHECK-GI-NEXT: add v16.4s, v10.4s, v11.4s
+; CHECK-GI-NEXT: add v17.4s, v12.4s, v13.4s
+; CHECK-GI-NEXT: add v0.4s, v0.4s, v6.4s
+; CHECK-GI-NEXT: add v2.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: mov v23.s[3], wzr
+; CHECK-GI-NEXT: mov v24.s[3], wzr
+; CHECK-GI-NEXT: add v3.4s, v22.4s, v25.4s
+; CHECK-GI-NEXT: add v6.4s, v26.4s, v27.4s
+; CHECK-GI-NEXT: add v1.4s, v1.4s, v4.4s
+; CHECK-GI-NEXT: add v4.4s, v5.4s, v7.4s
+; CHECK-GI-NEXT: add v5.4s, v16.4s, v17.4s
; CHECK-GI-NEXT: add v0.4s, v0.4s, v2.4s
+; CHECK-GI-NEXT: ldr x29, [sp, #64] // 8-byte Folded Reload
+; CHECK-GI-NEXT: ldp d9, d8, [sp, #48] // 16-byte Folded Reload
+; CHECK-GI-NEXT: add v2.4s, v3.4s, v6.4s
+; CHECK-GI-NEXT: add v1.4s, v23.4s, v1.4s
+; CHECK-GI-NEXT: add v3.4s, v4.4s, v5.4s
+; CHECK-GI-NEXT: add v0.4s, v24.4s, v0.4s
+; CHECK-GI-NEXT: ldp d11, d10, [sp, #32] // 16-byte Folded Reload
+; CHECK-GI-NEXT: add v1.4s, v2.4s, v1.4s
+; CHECK-GI-NEXT: ldp d13, d12, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: add v0.4s, v3.4s, v0.4s
; CHECK-GI-NEXT: addv s1, v1.4s
; CHECK-GI-NEXT: addv s0, v0.4s
; CHECK-GI-NEXT: fmov w8, s1
; CHECK-GI-NEXT: fmov w9, s0
; CHECK-GI-NEXT: add w0, w8, w9
-; CHECK-GI-NEXT: add sp, sp, #96
+; CHECK-GI-NEXT: ldp d15, d14, [sp], #80 // 16-byte Folded Reload
; CHECK-GI-NEXT: ret
entry:
%az = sext <33 x i8> %a to <33 x i32>
diff --git a/llvm/test/CodeGen/AArch64/neon-extmul.ll b/llvm/test/CodeGen/AArch64/neon-extmul.ll
index c82f8e1..84b634d 100644
--- a/llvm/test/CodeGen/AArch64/neon-extmul.ll
+++ b/llvm/test/CodeGen/AArch64/neon-extmul.ll
@@ -12,10 +12,9 @@ define <8 x i32> @extmuls_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmuls_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: smull v0.4s, v2.4h, v1.4h
-; CHECK-GI-NEXT: smull2 v1.4s, v2.8h, v1.8h
+; CHECK-GI-NEXT: smull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: sshll v0.4s, v1.4h, #0
+; CHECK-GI-NEXT: sshll2 v1.4s, v1.8h, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i32>
@@ -34,10 +33,9 @@ define <8 x i32> @extmulu_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmulu_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: umull v0.4s, v2.4h, v1.4h
-; CHECK-GI-NEXT: umull2 v1.4s, v2.8h, v1.8h
+; CHECK-GI-NEXT: umull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: ushll v0.4s, v1.4h, #0
+; CHECK-GI-NEXT: ushll2 v1.4s, v1.8h, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i32>
@@ -79,12 +77,9 @@ define <8 x i32> @extmuladds_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1, <8 x i32> %b)
;
; CHECK-GI-LABEL: extmuladds_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: smlal v2.4s, v0.4h, v1.4h
-; CHECK-GI-NEXT: smlal2 v3.4s, v0.8h, v1.8h
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
+; CHECK-GI-NEXT: smull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: saddw v0.4s, v2.4s, v1.4h
+; CHECK-GI-NEXT: saddw2 v1.4s, v3.4s, v1.8h
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i32>
@@ -104,12 +99,9 @@ define <8 x i32> @extmuladdu_v8i8_i32(<8 x i8> %s0, <8 x i8> %s1, <8 x i32> %b)
;
; CHECK-GI-LABEL: extmuladdu_v8i8_i32:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: umlal v2.4s, v0.4h, v1.4h
-; CHECK-GI-NEXT: umlal2 v3.4s, v0.8h, v1.8h
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
+; CHECK-GI-NEXT: umull v1.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: uaddw v0.4s, v2.4s, v1.4h
+; CHECK-GI-NEXT: uaddw2 v1.4s, v3.4s, v1.8h
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i32>
@@ -163,16 +155,13 @@ define <8 x i64> @extmuls_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmuls_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: sshll v2.4s, v0.4h, #0
-; CHECK-GI-NEXT: sshll v3.4s, v1.4h, #0
-; CHECK-GI-NEXT: sshll2 v4.4s, v0.8h, #0
-; CHECK-GI-NEXT: sshll2 v5.4s, v1.8h, #0
-; CHECK-GI-NEXT: smull v0.2d, v2.2s, v3.2s
-; CHECK-GI-NEXT: smull2 v1.2d, v2.4s, v3.4s
-; CHECK-GI-NEXT: smull v2.2d, v4.2s, v5.2s
-; CHECK-GI-NEXT: smull2 v3.2d, v4.4s, v5.4s
+; CHECK-GI-NEXT: smull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: sshll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: sshll2 v3.4s, v0.8h, #0
+; CHECK-GI-NEXT: sshll v0.2d, v1.2s, #0
+; CHECK-GI-NEXT: sshll2 v1.2d, v1.4s, #0
+; CHECK-GI-NEXT: sshll v2.2d, v3.2s, #0
+; CHECK-GI-NEXT: sshll2 v3.2d, v3.4s, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i64>
@@ -195,16 +184,13 @@ define <8 x i64> @extmulu_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1) {
;
; CHECK-GI-LABEL: extmulu_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll v2.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v3.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll2 v4.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v5.4s, v1.8h, #0
-; CHECK-GI-NEXT: umull v0.2d, v2.2s, v3.2s
-; CHECK-GI-NEXT: umull2 v1.2d, v2.4s, v3.4s
-; CHECK-GI-NEXT: umull v2.2d, v4.2s, v5.2s
-; CHECK-GI-NEXT: umull2 v3.2d, v4.4s, v5.4s
+; CHECK-GI-NEXT: umull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: ushll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v3.4s, v0.8h, #0
+; CHECK-GI-NEXT: ushll v0.2d, v1.2s, #0
+; CHECK-GI-NEXT: ushll2 v1.2d, v1.4s, #0
+; CHECK-GI-NEXT: ushll v2.2d, v3.2s, #0
+; CHECK-GI-NEXT: ushll2 v3.2d, v3.4s, #0
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i64>
@@ -263,20 +249,13 @@ define <8 x i64> @extmuladds_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1, <8 x i64> %b)
;
; CHECK-GI-LABEL: extmuladds_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: sshll v6.4s, v0.4h, #0
-; CHECK-GI-NEXT: sshll v7.4s, v1.4h, #0
-; CHECK-GI-NEXT: sshll2 v0.4s, v0.8h, #0
-; CHECK-GI-NEXT: sshll2 v1.4s, v1.8h, #0
-; CHECK-GI-NEXT: smlal v2.2d, v6.2s, v7.2s
-; CHECK-GI-NEXT: smlal2 v3.2d, v6.4s, v7.4s
-; CHECK-GI-NEXT: smlal v4.2d, v0.2s, v1.2s
-; CHECK-GI-NEXT: smlal2 v5.2d, v0.4s, v1.4s
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
+; CHECK-GI-NEXT: smull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: sshll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: sshll2 v6.4s, v0.8h, #0
+; CHECK-GI-NEXT: saddw v0.2d, v2.2d, v1.2s
+; CHECK-GI-NEXT: saddw2 v1.2d, v3.2d, v1.4s
+; CHECK-GI-NEXT: saddw v2.2d, v4.2d, v6.2s
+; CHECK-GI-NEXT: saddw2 v3.2d, v5.2d, v6.4s
; CHECK-GI-NEXT: ret
entry:
%s0s = sext <8 x i8> %s0 to <8 x i64>
@@ -301,20 +280,13 @@ define <8 x i64> @extmuladdu_v8i8_i64(<8 x i8> %s0, <8 x i8> %s1, <8 x i64> %b)
;
; CHECK-GI-LABEL: extmuladdu_v8i8_i64:
; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: ushll v6.4s, v0.4h, #0
-; CHECK-GI-NEXT: ushll v7.4s, v1.4h, #0
-; CHECK-GI-NEXT: ushll2 v0.4s, v0.8h, #0
-; CHECK-GI-NEXT: ushll2 v1.4s, v1.8h, #0
-; CHECK-GI-NEXT: umlal v2.2d, v6.2s, v7.2s
-; CHECK-GI-NEXT: umlal2 v3.2d, v6.4s, v7.4s
-; CHECK-GI-NEXT: umlal v4.2d, v0.2s, v1.2s
-; CHECK-GI-NEXT: umlal2 v5.2d, v0.4s, v1.4s
-; CHECK-GI-NEXT: mov v0.16b, v2.16b
-; CHECK-GI-NEXT: mov v1.16b, v3.16b
-; CHECK-GI-NEXT: mov v2.16b, v4.16b
-; CHECK-GI-NEXT: mov v3.16b, v5.16b
+; CHECK-GI-NEXT: umull v0.8h, v0.8b, v1.8b
+; CHECK-GI-NEXT: ushll v1.4s, v0.4h, #0
+; CHECK-GI-NEXT: ushll2 v6.4s, v0.8h, #0
+; CHECK-GI-NEXT: uaddw v0.2d, v2.2d, v1.2s
+; CHECK-GI-NEXT: uaddw2 v1.2d, v3.2d, v1.4s
+; CHECK-GI-NEXT: uaddw v2.2d, v4.2d, v6.2s
+; CHECK-GI-NEXT: uaddw2 v3.2d, v5.2d, v6.4s
; CHECK-GI-NEXT: ret
entry:
%s0s = zext <8 x i8> %s0 to <8 x i64>
diff --git a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
index 17ad298..3caac1d 100644
--- a/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
+++ b/llvm/test/CodeGen/AArch64/peephole-and-tst.ll
@@ -1,40 +1,72 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
+; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
%struct.anon = type { ptr, ptr }
@ptr_wrapper = common global ptr null, align 8
define i32 @test_func_i32_two_uses(i32 %in, i32 %bit, i32 %mask) {
-; CHECK-LABEL: test_func_i32_two_uses:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: adrp x8, :got:ptr_wrapper
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
-; CHECK-NEXT: ldr x9, [x8]
-; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB0_3
-; CHECK-NEXT: .LBB0_1: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: str xzr, [x9, #8]
-; CHECK-NEXT: .LBB0_2: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: lsl w1, w1, #1
-; CHECK-NEXT: cbz w1, .LBB0_6
-; CHECK-NEXT: .LBB0_3: // %do.body
-; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ands w10, w1, w0
-; CHECK-NEXT: and w11, w2, w0
-; CHECK-NEXT: cinc w8, w8, ne
-; CHECK-NEXT: cmp w10, w11
-; CHECK-NEXT: b.eq .LBB0_1
-; CHECK-NEXT: // %bb.4: // %do.body
-; CHECK-NEXT: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: cbnz w2, .LBB0_1
-; CHECK-NEXT: // %bb.5: // %do.body
-; CHECK-NEXT: // in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: cbz w10, .LBB0_2
-; CHECK-NEXT: b .LBB0_1
-; CHECK-NEXT: .LBB0_6: // %do.end
-; CHECK-NEXT: mov w0, w8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_func_i32_two_uses:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-SD-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-SD-NEXT: ldr x9, [x8]
+; CHECK-SD-NEXT: mov w8, wzr
+; CHECK-SD-NEXT: b .LBB0_3
+; CHECK-SD-NEXT: .LBB0_1: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: str xzr, [x9, #8]
+; CHECK-SD-NEXT: .LBB0_2: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: lsl w1, w1, #1
+; CHECK-SD-NEXT: cbz w1, .LBB0_6
+; CHECK-SD-NEXT: .LBB0_3: // %do.body
+; CHECK-SD-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-SD-NEXT: ands w10, w1, w0
+; CHECK-SD-NEXT: and w11, w2, w0
+; CHECK-SD-NEXT: cinc w8, w8, ne
+; CHECK-SD-NEXT: cmp w10, w11
+; CHECK-SD-NEXT: b.eq .LBB0_1
+; CHECK-SD-NEXT: // %bb.4: // %do.body
+; CHECK-SD-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: cbnz w2, .LBB0_1
+; CHECK-SD-NEXT: // %bb.5: // %do.body
+; CHECK-SD-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-SD-NEXT: cbz w10, .LBB0_2
+; CHECK-SD-NEXT: b .LBB0_1
+; CHECK-SD-NEXT: .LBB0_6: // %do.end
+; CHECK-SD-NEXT: mov w0, w8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_func_i32_two_uses:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-GI-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-GI-NEXT: ldr x9, [x8]
+; CHECK-GI-NEXT: mov w8, wzr
+; CHECK-GI-NEXT: b .LBB0_3
+; CHECK-GI-NEXT: .LBB0_1: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: str xzr, [x9, #8]
+; CHECK-GI-NEXT: .LBB0_2: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: lsl w1, w1, #1
+; CHECK-GI-NEXT: cbz w1, .LBB0_6
+; CHECK-GI-NEXT: .LBB0_3: // %do.body
+; CHECK-GI-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-GI-NEXT: and w10, w1, w0
+; CHECK-GI-NEXT: tst w1, w0
+; CHECK-GI-NEXT: and w11, w2, w0
+; CHECK-GI-NEXT: cinc w8, w8, ne
+; CHECK-GI-NEXT: cmp w10, w11
+; CHECK-GI-NEXT: b.eq .LBB0_1
+; CHECK-GI-NEXT: // %bb.4: // %do.body
+; CHECK-GI-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: cbnz w2, .LBB0_1
+; CHECK-GI-NEXT: // %bb.5: // %do.body
+; CHECK-GI-NEXT: // in Loop: Header=BB0_3 Depth=1
+; CHECK-GI-NEXT: cbz w10, .LBB0_2
+; CHECK-GI-NEXT: b .LBB0_1
+; CHECK-GI-NEXT: .LBB0_6: // %do.end
+; CHECK-GI-NEXT: mov w0, w8
+; CHECK-GI-NEXT: ret
entry:
%0 = load ptr, ptr @ptr_wrapper, align 8
%result = getelementptr inbounds %struct.anon, ptr %0, i64 0, i32 1
@@ -70,28 +102,52 @@ do.end: ; preds = %4
}
define i32 @test_func_i64_one_use(i64 %in, i64 %bit, i64 %mask) {
-; CHECK-LABEL: test_func_i64_one_use:
-; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: adrp x8, :got:ptr_wrapper
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
-; CHECK-NEXT: ldr x9, [x8]
-; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b .LBB1_2
-; CHECK-NEXT: .LBB1_1: // in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: lsl x1, x1, #1
-; CHECK-NEXT: cbz x1, .LBB1_4
-; CHECK-NEXT: .LBB1_2: // %do.body
-; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: ands x10, x1, x0
-; CHECK-NEXT: orr x10, x2, x10
-; CHECK-NEXT: cinc w8, w8, ne
-; CHECK-NEXT: cbz x10, .LBB1_1
-; CHECK-NEXT: // %bb.3: // in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: str xzr, [x9, #8]
-; CHECK-NEXT: b .LBB1_1
-; CHECK-NEXT: .LBB1_4: // %do.end
-; CHECK-NEXT: mov w0, w8
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_func_i64_one_use:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-SD-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-SD-NEXT: ldr x9, [x8]
+; CHECK-SD-NEXT: mov w8, wzr
+; CHECK-SD-NEXT: b .LBB1_2
+; CHECK-SD-NEXT: .LBB1_1: // in Loop: Header=BB1_2 Depth=1
+; CHECK-SD-NEXT: lsl x1, x1, #1
+; CHECK-SD-NEXT: cbz x1, .LBB1_4
+; CHECK-SD-NEXT: .LBB1_2: // %do.body
+; CHECK-SD-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-SD-NEXT: ands x10, x1, x0
+; CHECK-SD-NEXT: orr x10, x2, x10
+; CHECK-SD-NEXT: cinc w8, w8, ne
+; CHECK-SD-NEXT: cbz x10, .LBB1_1
+; CHECK-SD-NEXT: // %bb.3: // in Loop: Header=BB1_2 Depth=1
+; CHECK-SD-NEXT: str xzr, [x9, #8]
+; CHECK-SD-NEXT: b .LBB1_1
+; CHECK-SD-NEXT: .LBB1_4: // %do.end
+; CHECK-SD-NEXT: mov w0, w8
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_func_i64_one_use:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: adrp x8, :got:ptr_wrapper
+; CHECK-GI-NEXT: ldr x8, [x8, :got_lo12:ptr_wrapper]
+; CHECK-GI-NEXT: ldr x9, [x8]
+; CHECK-GI-NEXT: mov w8, wzr
+; CHECK-GI-NEXT: b .LBB1_2
+; CHECK-GI-NEXT: .LBB1_1: // in Loop: Header=BB1_2 Depth=1
+; CHECK-GI-NEXT: lsl x1, x1, #1
+; CHECK-GI-NEXT: cbz x1, .LBB1_4
+; CHECK-GI-NEXT: .LBB1_2: // %do.body
+; CHECK-GI-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-GI-NEXT: and x10, x1, x0
+; CHECK-GI-NEXT: tst x1, x0
+; CHECK-GI-NEXT: orr x10, x2, x10
+; CHECK-GI-NEXT: cinc w8, w8, ne
+; CHECK-GI-NEXT: cbz x10, .LBB1_1
+; CHECK-GI-NEXT: // %bb.3: // in Loop: Header=BB1_2 Depth=1
+; CHECK-GI-NEXT: str xzr, [x9, #8]
+; CHECK-GI-NEXT: b .LBB1_1
+; CHECK-GI-NEXT: .LBB1_4: // %do.end
+; CHECK-GI-NEXT: mov w0, w8
+; CHECK-GI-NEXT: ret
entry:
%0 = load ptr, ptr @ptr_wrapper, align 8
%result = getelementptr inbounds %struct.anon, ptr %0, i64 0, i32 1
@@ -124,11 +180,18 @@ do.end: ; preds = %4
}
define i64 @test_and1(i64 %x, i64 %y) {
-; CHECK-LABEL: test_and1:
-; CHECK: // %bb.0:
-; CHECK-NEXT: ands x8, x0, #0x3
-; CHECK-NEXT: csel x0, x8, x1, eq
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_and1:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: ands x8, x0, #0x3
+; CHECK-SD-NEXT: csel x0, x8, x1, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_and1:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and x8, x0, #0x3
+; CHECK-GI-NEXT: tst x0, #0x3
+; CHECK-GI-NEXT: csel x0, x8, x1, eq
+; CHECK-GI-NEXT: ret
%a = and i64 %x, 3
%c = icmp eq i64 %a, 0
%s = select i1 %c, i64 %a, i64 %y
@@ -148,23 +211,43 @@ define i64 @test_and2(i64 %x, i64 %y) {
}
define i64 @test_and3(i64 %x, i64 %y) {
-; CHECK-LABEL: test_and3:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
-; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w20, -16
-; CHECK-NEXT: .cfi_offset w30, -32
-; CHECK-NEXT: mov x20, x0
-; CHECK-NEXT: mov x0, xzr
-; CHECK-NEXT: mov x19, x1
-; CHECK-NEXT: bl callee
-; CHECK-NEXT: ands x8, x20, #0x3
-; CHECK-NEXT: csel x0, x8, x19, eq
-; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_and3:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-SD-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 32
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w20, -16
+; CHECK-SD-NEXT: .cfi_offset w30, -32
+; CHECK-SD-NEXT: mov x20, x0
+; CHECK-SD-NEXT: mov x0, xzr
+; CHECK-SD-NEXT: mov x19, x1
+; CHECK-SD-NEXT: bl callee
+; CHECK-SD-NEXT: ands x8, x20, #0x3
+; CHECK-SD-NEXT: csel x0, x8, x19, eq
+; CHECK-SD-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-SD-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_and3:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 32
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w21, -24
+; CHECK-GI-NEXT: .cfi_offset w30, -32
+; CHECK-GI-NEXT: mov x19, x0
+; CHECK-GI-NEXT: and x21, x0, #0x3
+; CHECK-GI-NEXT: mov x0, xzr
+; CHECK-GI-NEXT: mov x20, x1
+; CHECK-GI-NEXT: bl callee
+; CHECK-GI-NEXT: tst x19, #0x3
+; CHECK-GI-NEXT: csel x0, x21, x20, eq
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
+; CHECK-GI-NEXT: ret
%a = and i64 %x, 3
%b = call i64 @callee(i64 0)
%c = icmp eq i64 %a, 0
@@ -173,19 +256,37 @@ define i64 @test_and3(i64 %x, i64 %y) {
}
define i64 @test_and_4(i64 %x, i64 %y) {
-; CHECK-LABEL: test_and_4:
-; CHECK: // %bb.0:
-; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: ands x0, x0, #0x3
-; CHECK-NEXT: bl callee
-; CHECK-NEXT: ands x8, x19, #0x3
-; CHECK-NEXT: csel x0, x8, x0, eq
-; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT: ret
+; CHECK-SD-LABEL: test_and_4:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NEXT: mov x19, x0
+; CHECK-SD-NEXT: ands x0, x0, #0x3
+; CHECK-SD-NEXT: bl callee
+; CHECK-SD-NEXT: ands x8, x19, #0x3
+; CHECK-SD-NEXT: csel x0, x8, x0, eq
+; CHECK-SD-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_and_4:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 32
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w30, -32
+; CHECK-GI-NEXT: and x20, x0, #0x3
+; CHECK-GI-NEXT: mov x19, x0
+; CHECK-GI-NEXT: mov x0, x20
+; CHECK-GI-NEXT: bl callee
+; CHECK-GI-NEXT: tst x19, #0x3
+; CHECK-GI-NEXT: csel x0, x20, x0, eq
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-GI-NEXT: ret
%a = and i64 %x, 3
%b = call i64 @callee(i64 %a)
%c = icmp eq i64 %a, 0
diff --git a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
index 52cb2d9..c7fb2db 100644
--- a/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vector-interleave.ll
@@ -267,7 +267,7 @@ define <vscale x 32 x i16> @interleave4_nxv8i16(<vscale x 8 x i16> %vec0, <vscal
; SME2-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3
; SME2-NEXT: zip { z0.h - z3.h }, { z0.h - z3.h }
; SME2-NEXT: ret
- %retval = call <vscale x 32 x i16> @llvm.vector.interleave4.nxv8i16(<vscale x 8 x i16> %vec0, <vscale x 8 x i16> %vec1, <vscale x 8 x i16> %vec2, <vscale x 8 x i16> %vec3)
+ %retval = call <vscale x 32 x i16> @llvm.vector.interleave4.nxv32i16(<vscale x 8 x i16> %vec0, <vscale x 8 x i16> %vec1, <vscale x 8 x i16> %vec2, <vscale x 8 x i16> %vec3)
ret <vscale x 32 x i16> %retval
}
@@ -540,30 +540,81 @@ define <vscale x 4 x i32> @interleave2_nxv2i32(<vscale x 2 x i32> %vec0, <vscale
ret <vscale x 4 x i32> %retval
}
-; Float declarations
-declare <vscale x 4 x half> @llvm.vector.interleave2.nxv4f16(<vscale x 2 x half>, <vscale x 2 x half>)
-declare <vscale x 8 x half> @llvm.vector.interleave2.nxv8f16(<vscale x 4 x half>, <vscale x 4 x half>)
-declare <vscale x 16 x half> @llvm.vector.interleave2.nxv16f16(<vscale x 8 x half>, <vscale x 8 x half>)
-declare <vscale x 4 x float> @llvm.vector.interleave2.nxv4f32(<vscale x 2 x float>, <vscale x 2 x float>)
-declare <vscale x 8 x float> @llvm.vector.interleave2.nxv8f32(<vscale x 4 x float>, <vscale x 4 x float>)
-declare <vscale x 4 x double> @llvm.vector.interleave2.nxv4f64(<vscale x 2 x double>, <vscale x 2 x double>)
+define <vscale x 4 x i16> @interleave2_same_const_splat_nxv4i16() {
+; CHECK-LABEL: interleave2_same_const_splat_nxv4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.s, #3 // =0x3
+; CHECK-NEXT: ret
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3))
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 4 x i16> @interleave2_diff_const_splat_nxv4i16() {
+; SVE-LABEL: interleave2_diff_const_splat_nxv4i16:
+; SVE: // %bb.0:
+; SVE-NEXT: mov z0.d, #4 // =0x4
+; SVE-NEXT: mov z1.d, #3 // =0x3
+; SVE-NEXT: zip2 z2.d, z1.d, z0.d
+; SVE-NEXT: zip1 z0.d, z1.d, z0.d
+; SVE-NEXT: uzp1 z0.s, z0.s, z2.s
+; SVE-NEXT: ret
+;
+; SME2-LABEL: interleave2_diff_const_splat_nxv4i16:
+; SME2: // %bb.0:
+; SME2-NEXT: mov z0.d, #4 // =0x4
+; SME2-NEXT: mov z1.d, #3 // =0x3
+; SME2-NEXT: zip { z0.d, z1.d }, z1.d, z0.d
+; SME2-NEXT: uzp1 z0.s, z0.s, z1.s
+; SME2-NEXT: ret
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.v4i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 4))
+ ret <vscale x 4 x i16> %retval
+}
-; Integer declarations
-declare <vscale x 32 x i8> @llvm.vector.interleave2.nxv32i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
-declare <vscale x 16 x i16> @llvm.vector.interleave2.nxv16i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
-declare <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
-declare <vscale x 4 x i64> @llvm.vector.interleave2.nxv4i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+define <vscale x 4 x i16> @interleave2_same_nonconst_splat_nxv4i16(i16 %a) {
+; CHECK-LABEL: interleave2_same_nonconst_splat_nxv4i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.s, w0
+; CHECK-NEXT: ret
+ %ins = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0
+ %splat = shufflevector <vscale x 2 x i16> %ins, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> %splat, <vscale x 2 x i16> %splat)
+ ret <vscale x 4 x i16> %retval
+}
-; Predicated
-declare <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1>, <vscale x 16 x i1>)
-declare <vscale x 16 x i1> @llvm.vector.interleave2.nxv16i1(<vscale x 8 x i1>, <vscale x 8 x i1>)
-declare <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1>, <vscale x 4 x i1>)
-declare <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1>, <vscale x 2 x i1>)
-
-; Illegal type size
-declare <vscale x 16 x i32> @llvm.vector.interleave2.nxv16i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
-declare <vscale x 8 x i64> @llvm.vector.interleave2.nxv8i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
-
-declare <vscale x 16 x i8> @llvm.vector.interleave2.nxv16i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
-declare <vscale x 8 x i16> @llvm.vector.interleave2.nxv8i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
-declare <vscale x 4 x i32> @llvm.vector.interleave2.nxv4i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
+define <vscale x 4 x i16> @interleave2_diff_nonconst_splat_nxv4i16(i16 %a, i16 %b) {
+; SVE-LABEL: interleave2_diff_nonconst_splat_nxv4i16:
+; SVE: // %bb.0:
+; SVE-NEXT: // kill: def $w1 killed $w1 def $x1
+; SVE-NEXT: // kill: def $w0 killed $w0 def $x0
+; SVE-NEXT: mov z0.d, x0
+; SVE-NEXT: mov z1.d, x1
+; SVE-NEXT: zip2 z2.d, z0.d, z1.d
+; SVE-NEXT: zip1 z0.d, z0.d, z1.d
+; SVE-NEXT: uzp1 z0.s, z0.s, z2.s
+; SVE-NEXT: ret
+;
+; SME2-LABEL: interleave2_diff_nonconst_splat_nxv4i16:
+; SME2: // %bb.0:
+; SME2-NEXT: // kill: def $w1 killed $w1 def $x1
+; SME2-NEXT: // kill: def $w0 killed $w0 def $x0
+; SME2-NEXT: mov z0.d, x0
+; SME2-NEXT: mov z1.d, x1
+; SME2-NEXT: zip { z0.d, z1.d }, z0.d, z1.d
+; SME2-NEXT: uzp1 z0.s, z0.s, z1.s
+; SME2-NEXT: ret
+ %ins1 = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0
+ %splat1 = shufflevector <vscale x 2 x i16> %ins1, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %ins2 = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+ %splat2 = shufflevector <vscale x 2 x i16> %ins2, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> %splat1, <vscale x 2 x i16> %splat2)
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 8 x i16> @interleave4_same_const_splat_nxv8i16() {
+; CHECK-LABEL: interleave4_same_const_splat_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.h, #3 // =0x3
+; CHECK-NEXT: ret
+ %retval = call <vscale x 8 x i16> @llvm.vector.interleave4.nxv8i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3))
+ ret <vscale x 8 x i16> %retval
+}
diff --git a/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll b/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll
index 9306c20..7dcd56c 100644
--- a/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vscale-combine.ll
@@ -1,14 +1,14 @@
-; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s |FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mattr=+sve < %s | FileCheck %s
-declare i32 @llvm.vscale.i32()
-declare i64 @llvm.vscale.i64()
+target triple = "aarch64-unknown-linux-gnu"
; Fold (add (vscale * C0), (vscale * C1)) to (vscale * (C0 + C1)).
define i64 @combine_add_vscale_i64() nounwind {
; CHECK-LABEL: combine_add_vscale_i64:
-; CHECK-NOT: add
-; CHECK-NEXT: cntd x0
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: cntd x0
+; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%add = add i64 %vscale, %vscale
ret i64 %add
@@ -16,9 +16,10 @@ define i64 @combine_add_vscale_i64() nounwind {
define i32 @combine_add_vscale_i32() nounwind {
; CHECK-LABEL: combine_add_vscale_i32:
-; CHECK-NOT: add
-; CHECK-NEXT: cntd x0
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: cntd x0
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
%vscale = call i32 @llvm.vscale.i32()
%add = add i32 %vscale, %vscale
ret i32 %add
@@ -28,9 +29,9 @@ define i32 @combine_add_vscale_i32() nounwind {
; In this test, C0 = 1, C1 = 32.
define i64 @combine_mul_vscale_i64() nounwind {
; CHECK-LABEL: combine_mul_vscale_i64:
-; CHECK-NOT: mul
-; CHECK-NEXT: rdvl x0, #2
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x0, #2
+; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%mul = mul i64 %vscale, 32
ret i64 %mul
@@ -38,9 +39,10 @@ define i64 @combine_mul_vscale_i64() nounwind {
define i32 @combine_mul_vscale_i32() nounwind {
; CHECK-LABEL: combine_mul_vscale_i32:
-; CHECK-NOT: mul
-; CHECK-NEXT: rdvl x0, #3
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x0, #3
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
%vscale = call i32 @llvm.vscale.i32()
%mul = mul i32 %vscale, 48
ret i32 %mul
@@ -49,11 +51,11 @@ define i32 @combine_mul_vscale_i32() nounwind {
; Canonicalize (sub X, (vscale * C)) to (add X, (vscale * -C))
define i64 @combine_sub_vscale_i64(i64 %in) nounwind {
; CHECK-LABEL: combine_sub_vscale_i64:
-; CHECK-NOT: sub
-; CHECK-NEXT: rdvl x8, #-1
-; CHECK-NEXT: asr x8, x8, #4
-; CHECK-NEXT: add x0, x0, x8
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #-1
+; CHECK-NEXT: asr x8, x8, #4
+; CHECK-NEXT: add x0, x0, x8
+; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%sub = sub i64 %in, %vscale
ret i64 %sub
@@ -61,11 +63,11 @@ define i64 @combine_sub_vscale_i64(i64 %in) nounwind {
define i32 @combine_sub_vscale_i32(i32 %in) nounwind {
; CHECK-LABEL: combine_sub_vscale_i32:
-; CHECK-NOT: sub
-; CHECK-NEXT: rdvl x8, #-1
-; CHECK-NEXT: asr x8, x8, #4
-; CHECK-NEXT: add w0, w0, w8
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #-1
+; CHECK-NEXT: asr x8, x8, #4
+; CHECK-NEXT: add w0, w0, w8
+; CHECK-NEXT: ret
%vscale = call i32 @llvm.vscale.i32()
%sub = sub i32 %in, %vscale
ret i32 %sub
@@ -75,12 +77,13 @@ define i32 @combine_sub_vscale_i32(i32 %in) nounwind {
; (sub X, (vscale * C)) to (add X, (vscale * -C))
define i64 @multiple_uses_sub_vscale_i64(i64 %x, i64 %y) nounwind {
; CHECK-LABEL: multiple_uses_sub_vscale_i64:
-; CHECK-NEXT: rdvl x8, #1
-; CHECK-NEXT: lsr x8, x8, #4
-; CHECK-NEXT: sub x9, x0, x8
-; CHECK-NEXT: add x8, x1, x8
-; CHECK-NEXT: mul x0, x9, x8
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x8, #1
+; CHECK-NEXT: lsr x8, x8, #4
+; CHECK-NEXT: sub x9, x0, x8
+; CHECK-NEXT: add x8, x1, x8
+; CHECK-NEXT: mul x0, x9, x8
+; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%sub = sub i64 %x, %vscale
%add = add i64 %y, %vscale
@@ -95,9 +98,9 @@ define i64 @multiple_uses_sub_vscale_i64(i64 %x, i64 %y) nounwind {
; Hence, the immediate for RDVL is #1.
define i64 @combine_shl_vscale_i64() nounwind {
; CHECK-LABEL: combine_shl_vscale_i64:
-; CHECK-NOT: shl
-; CHECK-NEXT: rdvl x0, #1
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x0, #1
+; CHECK-NEXT: ret
%vscale = call i64 @llvm.vscale.i64()
%shl = shl i64 %vscale, 4
ret i64 %shl
@@ -105,10 +108,38 @@ define i64 @combine_shl_vscale_i64() nounwind {
define i32 @combine_shl_vscale_i32() nounwind {
; CHECK-LABEL: combine_shl_vscale_i32:
-; CHECK-NOT: shl
-; CHECK-NEXT: rdvl x0, #1
-; CHECK-NEXT: ret
+; CHECK: // %bb.0:
+; CHECK-NEXT: rdvl x0, #1
+; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT: ret
%vscale = call i32 @llvm.vscale.i32()
%shl = shl i32 %vscale, 4
ret i32 %shl
}
+
+define i64 @combine_shl_mul_vscale(i64 %a) nounwind {
+; CHECK-LABEL: combine_shl_mul_vscale:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cnth x8
+; CHECK-NEXT: mul x0, x0, x8
+; CHECK-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale.i64()
+ %mul = mul i64 %a, %vscale
+ %shl = shl i64 %mul, 3
+ ret i64 %shl
+}
+
+define i64 @combine_shl_mul_vscale_commuted(i64 %a) nounwind {
+; CHECK-LABEL: combine_shl_mul_vscale_commuted:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cnth x8
+; CHECK-NEXT: mul x0, x0, x8
+; CHECK-NEXT: ret
+ %vscale = tail call i64 @llvm.vscale.i64()
+ %mul = mul i64 %vscale, %a
+ %shl = shl i64 %mul, 3
+ ret i64 %shl
+}
+
+declare i32 @llvm.vscale.i32()
+declare i64 @llvm.vscale.i64()
diff --git a/llvm/test/CodeGen/AArch64/tbl-loops.ll b/llvm/test/CodeGen/AArch64/tbl-loops.ll
index 223698b..5fc996a 100644
--- a/llvm/test/CodeGen/AArch64/tbl-loops.ll
+++ b/llvm/test/CodeGen/AArch64/tbl-loops.ll
@@ -64,7 +64,8 @@ define void @loop1(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
; CHECK-NEXT: fcsel s2, s0, s3, mi
; CHECK-NEXT: subs w10, w10, #1
; CHECK-NEXT: fcvtzs s2, s2
-; CHECK-NEXT: st1 { v2.b }[0], [x9], #1
+; CHECK-NEXT: fmov w11, s2
+; CHECK-NEXT: strb w11, [x9], #1
; CHECK-NEXT: b.ne .LBB0_7
; CHECK-NEXT: .LBB0_8: // %for.cond.cleanup
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-add.ll b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
index 290a473..74d1165 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-add.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-add.ll
@@ -1907,11 +1907,8 @@ define i32 @test_udot_v8i8(<8 x i8> %a, <8 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_udot_v8i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: umull v2.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: umlal2 v2.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: addv s0, v2.4s
+; CHECK-GI-BASE-NEXT: umull v0.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
; CHECK-GI-BASE-NEXT: fmov w0, s0
; CHECK-GI-BASE-NEXT: ret
;
@@ -1952,17 +1949,13 @@ define i32 @test_udot_v16i8(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_udot_v16i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: ushll v2.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v0.8h, v0.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v3.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: umull v4.4s, v3.4h, v2.4h
-; CHECK-GI-BASE-NEXT: umull v5.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: umlal2 v4.4s, v3.8h, v2.8h
-; CHECK-GI-BASE-NEXT: umlal2 v5.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v5.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: umull v2.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: umull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: uaddlv s1, v2.8h
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s1
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v16i8:
@@ -2018,36 +2011,21 @@ define i32 @test_udot_v24i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_udot_v24i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: ldr q2, [x0]
-; CHECK-GI-BASE-NEXT: ldr d3, [x0, #16]
-; CHECK-GI-BASE-NEXT: ldr q4, [x1]
-; CHECK-GI-BASE-NEXT: ldr d5, [x1, #16]
-; CHECK-GI-BASE-NEXT: ushll v6.8h, v2.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v2.8h, v2.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: ushll v3.8h, v3.8b, #0
-; CHECK-GI-BASE-NEXT: ushll v7.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v4.8h, v4.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v5.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: umull v16.4s, v7.4h, v6.4h
-; CHECK-GI-BASE-NEXT: umull v17.4s, v4.4h, v2.4h
-; CHECK-GI-BASE-NEXT: umull v18.4s, v5.4h, v3.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: umlal2 v16.4s, v7.8h, v6.8h
-; CHECK-GI-BASE-NEXT: umlal2 v17.4s, v4.8h, v2.8h
-; CHECK-GI-BASE-NEXT: umlal2 v18.4s, v5.8h, v3.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v16.4s, v17.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v18.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldr q0, [x0]
+; CHECK-GI-BASE-NEXT: ldr q1, [x1]
+; CHECK-GI-BASE-NEXT: ldr d2, [x0, #16]
+; CHECK-GI-BASE-NEXT: ldr d3, [x1, #16]
+; CHECK-GI-BASE-NEXT: umull v4.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: umull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: umull v1.8h, v3.8b, v2.8b
+; CHECK-GI-BASE-NEXT: uaddlv s2, v4.8h
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: uaddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s2
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v24i8:
@@ -2118,61 +2096,33 @@ define i32 @test_udot_v48i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_udot_v48i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s2, wzr
-; CHECK-GI-BASE-NEXT: ldr q16, [x0, #32]
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: fmov s3, wzr
-; CHECK-GI-BASE-NEXT: ldr q19, [x1, #32]
-; CHECK-GI-BASE-NEXT: ldp q5, q7, [x1]
-; CHECK-GI-BASE-NEXT: ushll v23.8h, v16.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[1], wzr
-; CHECK-GI-BASE-NEXT: ushll v20.8h, v19.8b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[1], wzr
-; CHECK-GI-BASE-NEXT: ushll2 v19.8h, v19.16b, #0
-; CHECK-GI-BASE-NEXT: ldp q18, q17, [x0]
-; CHECK-GI-BASE-NEXT: ushll v4.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v5.8h, v5.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v6.8h, v7.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v7.8h, v7.16b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v16.8h, v16.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[2], wzr
-; CHECK-GI-BASE-NEXT: ushll v21.8h, v18.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v18.8h, v18.16b, #0
-; CHECK-GI-BASE-NEXT: ushll v22.8h, v17.8b, #0
-; CHECK-GI-BASE-NEXT: ushll2 v17.8h, v17.16b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[2], wzr
-; CHECK-GI-BASE-NEXT: umull v28.4s, v20.4h, v23.4h
-; CHECK-GI-BASE-NEXT: umull v29.4s, v19.4h, v16.4h
-; CHECK-GI-BASE-NEXT: umull v24.4s, v4.4h, v21.4h
-; CHECK-GI-BASE-NEXT: umull v25.4s, v5.4h, v18.4h
-; CHECK-GI-BASE-NEXT: umull v26.4s, v6.4h, v22.4h
-; CHECK-GI-BASE-NEXT: umull v27.4s, v7.4h, v17.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[3], wzr
-; CHECK-GI-BASE-NEXT: umlal2 v28.4s, v20.8h, v23.8h
-; CHECK-GI-BASE-NEXT: umlal2 v29.4s, v19.8h, v16.8h
-; CHECK-GI-BASE-NEXT: umlal2 v24.4s, v4.8h, v21.8h
-; CHECK-GI-BASE-NEXT: umlal2 v25.4s, v5.8h, v18.8h
-; CHECK-GI-BASE-NEXT: umlal2 v26.4s, v6.8h, v22.8h
-; CHECK-GI-BASE-NEXT: umlal2 v27.4s, v7.8h, v17.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v2.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v4.4s, v28.4s, v29.4s
-; CHECK-GI-BASE-NEXT: add v2.4s, v24.4s, v25.4s
-; CHECK-GI-BASE-NEXT: add v3.4s, v26.4s, v27.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v2.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldp q0, q1, [x0]
+; CHECK-GI-BASE-NEXT: ldr q3, [x0, #32]
+; CHECK-GI-BASE-NEXT: ldp q2, q4, [x1]
+; CHECK-GI-BASE-NEXT: ldr q5, [x1, #32]
+; CHECK-GI-BASE-NEXT: umull v7.8h, v5.8b, v3.8b
+; CHECK-GI-BASE-NEXT: umull2 v3.8h, v5.16b, v3.16b
+; CHECK-GI-BASE-NEXT: umull v6.8h, v2.8b, v0.8b
+; CHECK-GI-BASE-NEXT: umull2 v0.8h, v2.16b, v0.16b
+; CHECK-GI-BASE-NEXT: umull2 v2.8h, v4.16b, v1.16b
+; CHECK-GI-BASE-NEXT: umull v1.8h, v4.8b, v1.8b
+; CHECK-GI-BASE-NEXT: uaddlv s5, v7.8h
+; CHECK-GI-BASE-NEXT: uaddlv s3, v3.8h
+; CHECK-GI-BASE-NEXT: uaddlv s4, v6.8h
+; CHECK-GI-BASE-NEXT: uaddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: uaddlv s2, v2.8h
+; CHECK-GI-BASE-NEXT: uaddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: fmov w8, s4
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: fmov w10, s2
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s3
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_udot_v48i8:
@@ -2225,11 +2175,8 @@ define i32 @test_sdot_v8i8(<8 x i8> %a, <8 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v8i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: sshll v0.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: smull v2.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: smlal2 v2.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: addv s0, v2.4s
+; CHECK-GI-BASE-NEXT: smull v0.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
; CHECK-GI-BASE-NEXT: fmov w0, s0
; CHECK-GI-BASE-NEXT: ret
;
@@ -2270,17 +2217,13 @@ define i32 @test_sdot_v16i8(<16 x i8> %a, <16 x i8> %b) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v16i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: sshll v2.8h, v0.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v0.8h, v0.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v3.8h, v1.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v1.8h, v1.16b, #0
-; CHECK-GI-BASE-NEXT: smull v4.4s, v3.4h, v2.4h
-; CHECK-GI-BASE-NEXT: smull v5.4s, v1.4h, v0.4h
-; CHECK-GI-BASE-NEXT: smlal2 v4.4s, v3.8h, v2.8h
-; CHECK-GI-BASE-NEXT: smlal2 v5.4s, v1.8h, v0.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v5.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: smull v2.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: smull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: saddlv s1, v2.8h
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s1
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v16i8:
@@ -2336,36 +2279,21 @@ define i32 @test_sdot_v24i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v24i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: ldr q2, [x0]
-; CHECK-GI-BASE-NEXT: ldr d3, [x0, #16]
-; CHECK-GI-BASE-NEXT: ldr q4, [x1]
-; CHECK-GI-BASE-NEXT: ldr d5, [x1, #16]
-; CHECK-GI-BASE-NEXT: sshll v6.8h, v2.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v2.8h, v2.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: sshll v3.8h, v3.8b, #0
-; CHECK-GI-BASE-NEXT: sshll v7.8h, v4.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v4.8h, v4.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v5.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: smull v16.4s, v7.4h, v6.4h
-; CHECK-GI-BASE-NEXT: smull v17.4s, v4.4h, v2.4h
-; CHECK-GI-BASE-NEXT: smull v18.4s, v5.4h, v3.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: smlal2 v16.4s, v7.8h, v6.8h
-; CHECK-GI-BASE-NEXT: smlal2 v17.4s, v4.8h, v2.8h
-; CHECK-GI-BASE-NEXT: smlal2 v18.4s, v5.8h, v3.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v16.4s, v17.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v18.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldr q0, [x0]
+; CHECK-GI-BASE-NEXT: ldr q1, [x1]
+; CHECK-GI-BASE-NEXT: ldr d2, [x0, #16]
+; CHECK-GI-BASE-NEXT: ldr d3, [x1, #16]
+; CHECK-GI-BASE-NEXT: smull v4.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: smull2 v0.8h, v1.16b, v0.16b
+; CHECK-GI-BASE-NEXT: smull v1.8h, v3.8b, v2.8b
+; CHECK-GI-BASE-NEXT: saddlv s2, v4.8h
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: saddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w8, s2
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v24i8:
@@ -2436,61 +2364,33 @@ define i32 @test_sdot_v48i8(ptr %p1, ptr %p2) {
;
; CHECK-GI-BASE-LABEL: test_sdot_v48i8:
; CHECK-GI-BASE: // %bb.0: // %entry
-; CHECK-GI-BASE-NEXT: fmov s0, wzr
-; CHECK-GI-BASE-NEXT: fmov s2, wzr
-; CHECK-GI-BASE-NEXT: ldr q16, [x0, #32]
-; CHECK-GI-BASE-NEXT: fmov s1, wzr
-; CHECK-GI-BASE-NEXT: fmov s3, wzr
-; CHECK-GI-BASE-NEXT: ldr q19, [x1, #32]
-; CHECK-GI-BASE-NEXT: ldp q5, q7, [x1]
-; CHECK-GI-BASE-NEXT: sshll v23.8h, v16.8b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[1], wzr
-; CHECK-GI-BASE-NEXT: sshll v20.8h, v19.8b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[1], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[1], wzr
-; CHECK-GI-BASE-NEXT: sshll2 v19.8h, v19.16b, #0
-; CHECK-GI-BASE-NEXT: ldp q18, q17, [x0]
-; CHECK-GI-BASE-NEXT: sshll v4.8h, v5.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v5.8h, v5.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v6.8h, v7.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v7.8h, v7.16b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v16.8h, v16.16b, #0
-; CHECK-GI-BASE-NEXT: mov v0.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[2], wzr
-; CHECK-GI-BASE-NEXT: sshll v21.8h, v18.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v18.8h, v18.16b, #0
-; CHECK-GI-BASE-NEXT: sshll v22.8h, v17.8b, #0
-; CHECK-GI-BASE-NEXT: sshll2 v17.8h, v17.16b, #0
-; CHECK-GI-BASE-NEXT: mov v1.s[2], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[2], wzr
-; CHECK-GI-BASE-NEXT: smull v28.4s, v20.4h, v23.4h
-; CHECK-GI-BASE-NEXT: smull v29.4s, v19.4h, v16.4h
-; CHECK-GI-BASE-NEXT: smull v24.4s, v4.4h, v21.4h
-; CHECK-GI-BASE-NEXT: smull v25.4s, v5.4h, v18.4h
-; CHECK-GI-BASE-NEXT: smull v26.4s, v6.4h, v22.4h
-; CHECK-GI-BASE-NEXT: smull v27.4s, v7.4h, v17.4h
-; CHECK-GI-BASE-NEXT: mov v0.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v2.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v1.s[3], wzr
-; CHECK-GI-BASE-NEXT: mov v3.s[3], wzr
-; CHECK-GI-BASE-NEXT: smlal2 v28.4s, v20.8h, v23.8h
-; CHECK-GI-BASE-NEXT: smlal2 v29.4s, v19.8h, v16.8h
-; CHECK-GI-BASE-NEXT: smlal2 v24.4s, v4.8h, v21.8h
-; CHECK-GI-BASE-NEXT: smlal2 v25.4s, v5.8h, v18.8h
-; CHECK-GI-BASE-NEXT: smlal2 v26.4s, v6.8h, v22.8h
-; CHECK-GI-BASE-NEXT: smlal2 v27.4s, v7.8h, v17.8h
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v2.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v1.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v4.4s, v28.4s, v29.4s
-; CHECK-GI-BASE-NEXT: add v2.4s, v24.4s, v25.4s
-; CHECK-GI-BASE-NEXT: add v3.4s, v26.4s, v27.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v0.4s, v1.4s
-; CHECK-GI-BASE-NEXT: add v1.4s, v2.4s, v3.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v4.4s, v0.4s
-; CHECK-GI-BASE-NEXT: add v0.4s, v1.4s, v0.4s
-; CHECK-GI-BASE-NEXT: addv s0, v0.4s
-; CHECK-GI-BASE-NEXT: fmov w0, s0
+; CHECK-GI-BASE-NEXT: ldp q0, q1, [x0]
+; CHECK-GI-BASE-NEXT: ldr q3, [x0, #32]
+; CHECK-GI-BASE-NEXT: ldp q2, q4, [x1]
+; CHECK-GI-BASE-NEXT: ldr q5, [x1, #32]
+; CHECK-GI-BASE-NEXT: smull v7.8h, v5.8b, v3.8b
+; CHECK-GI-BASE-NEXT: smull2 v3.8h, v5.16b, v3.16b
+; CHECK-GI-BASE-NEXT: smull v6.8h, v2.8b, v0.8b
+; CHECK-GI-BASE-NEXT: smull2 v0.8h, v2.16b, v0.16b
+; CHECK-GI-BASE-NEXT: smull2 v2.8h, v4.16b, v1.16b
+; CHECK-GI-BASE-NEXT: smull v1.8h, v4.8b, v1.8b
+; CHECK-GI-BASE-NEXT: saddlv s5, v7.8h
+; CHECK-GI-BASE-NEXT: saddlv s3, v3.8h
+; CHECK-GI-BASE-NEXT: saddlv s4, v6.8h
+; CHECK-GI-BASE-NEXT: saddlv s0, v0.8h
+; CHECK-GI-BASE-NEXT: saddlv s2, v2.8h
+; CHECK-GI-BASE-NEXT: saddlv s1, v1.8h
+; CHECK-GI-BASE-NEXT: fmov w11, s5
+; CHECK-GI-BASE-NEXT: fmov w8, s4
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: fmov w10, s2
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: fmov w9, s1
+; CHECK-GI-BASE-NEXT: add w10, w10, w11
+; CHECK-GI-BASE-NEXT: fmov w11, s3
+; CHECK-GI-BASE-NEXT: add w8, w8, w9
+; CHECK-GI-BASE-NEXT: add w9, w10, w11
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
; CHECK-GI-BASE-NEXT: ret
;
; CHECK-GI-DOT-LABEL: test_sdot_v48i8:
@@ -2549,18 +2449,27 @@ define i32 @test_udot_v8i8_multi_use(<8 x i8> %a, <8 x i8> %b) {
; CHECK-SD-DOT-NEXT: add w0, w8, w9
; CHECK-SD-DOT-NEXT: ret
;
-; CHECK-GI-LABEL: test_udot_v8i8_multi_use:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: ushll v0.8h, v0.8b, #0
-; CHECK-GI-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-GI-NEXT: umull v2.4s, v1.4h, v0.4h
-; CHECK-GI-NEXT: mov v3.16b, v2.16b
-; CHECK-GI-NEXT: fmov w8, s2
-; CHECK-GI-NEXT: umlal2 v3.4s, v1.8h, v0.8h
-; CHECK-GI-NEXT: addv s0, v3.4s
-; CHECK-GI-NEXT: fmov w9, s0
-; CHECK-GI-NEXT: add w0, w9, w8
-; CHECK-GI-NEXT: ret
+; CHECK-GI-BASE-LABEL: test_udot_v8i8_multi_use:
+; CHECK-GI-BASE: // %bb.0: // %entry
+; CHECK-GI-BASE-NEXT: umull v0.8h, v1.8b, v0.8b
+; CHECK-GI-BASE-NEXT: uaddlv s1, v0.8h
+; CHECK-GI-BASE-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-GI-BASE-NEXT: fmov w9, s0
+; CHECK-GI-BASE-NEXT: fmov w8, s1
+; CHECK-GI-BASE-NEXT: add w0, w8, w9
+; CHECK-GI-BASE-NEXT: ret
+;
+; CHECK-GI-DOT-LABEL: test_udot_v8i8_multi_use:
+; CHECK-GI-DOT: // %bb.0: // %entry
+; CHECK-GI-DOT-NEXT: movi v2.2d, #0000000000000000
+; CHECK-GI-DOT-NEXT: umull v3.8h, v1.8b, v0.8b
+; CHECK-GI-DOT-NEXT: udot v2.2s, v1.8b, v0.8b
+; CHECK-GI-DOT-NEXT: ushll v0.4s, v3.4h, #0
+; CHECK-GI-DOT-NEXT: fmov w9, s0
+; CHECK-GI-DOT-NEXT: addp v1.2s, v2.2s, v2.2s
+; CHECK-GI-DOT-NEXT: fmov w8, s1
+; CHECK-GI-DOT-NEXT: add w0, w8, w9
+; CHECK-GI-DOT-NEXT: ret
entry:
%0 = zext <8 x i8> %a to <8 x i32>
%1 = zext <8 x i8> %b to <8 x i32>
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll
index eafad58..2226fd2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/frem.ll
@@ -157,7 +157,7 @@ define amdgpu_kernel void @unsafe_frem_f16(ptr addrspace(1) %out, ptr addrspace(
%gep2 = getelementptr half, ptr addrspace(1) %in2, i32 4
%r0 = load half, ptr addrspace(1) %in1, align 4
%r1 = load half, ptr addrspace(1) %gep2, align 4
- %r2 = frem half %r0, %r1
+ %r2 = frem afn half %r0, %r1
store half %r2, ptr addrspace(1) %out, align 4
ret void
}
@@ -311,7 +311,7 @@ define amdgpu_kernel void @unsafe_frem_f32(ptr addrspace(1) %out, ptr addrspace(
%gep2 = getelementptr float, ptr addrspace(1) %in2, i32 4
%r0 = load float, ptr addrspace(1) %in1, align 4
%r1 = load float, ptr addrspace(1) %gep2, align 4
- %r2 = frem float %r0, %r1
+ %r2 = frem afn float %r0, %r1
store float %r2, ptr addrspace(1) %out, align 4
ret void
}
@@ -489,7 +489,7 @@ define amdgpu_kernel void @unsafe_frem_f64(ptr addrspace(1) %out, ptr addrspace(
ptr addrspace(1) %in2) #1 {
%r0 = load double, ptr addrspace(1) %in1, align 8
%r1 = load double, ptr addrspace(1) %in2, align 8
- %r2 = frem double %r0, %r1
+ %r2 = frem afn double %r0, %r1
store double %r2, ptr addrspace(1) %out, align 8
ret void
}
@@ -1140,5 +1140,5 @@ define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-attributes #0 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #1 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #1 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
index 1f9c059..3fa73c2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fdiv.mir
@@ -2,9 +2,8 @@
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=legalizer %s -o - | FileCheck -check-prefix=SI %s
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -run-pass=legalizer %s -o - | FileCheck -check-prefix=VI %s
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX9 %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -run-pass=legalizer -enable-unsafe-fp-math -o - %s | FileCheck -check-prefix=GFX9-UNSAFE %s
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX11 %s
---
name: test_fdiv_s16
@@ -99,17 +98,56 @@ body: |
; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s16
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; GFX10-LABEL: name: test_fdiv_s16
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC1]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC1]](s16), [[TRUNC]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_TRUNC %0
@@ -120,6 +158,90 @@ body: |
...
---
+name: test_fdiv_s16_afn
+machineFunctionInfo:
+ mode:
+ fp32-input-denormals: true
+ fp32-output-denormals: true
+ fp64-fp16-input-denormals: true
+ fp64-fp16-output-denormals: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; SI-LABEL: name: test_fdiv_s16_afn
+ ; SI: liveins: $vgpr0, $vgpr1
+ ; SI-NEXT: {{ $}}
+ ; SI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; SI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; SI-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; SI-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; SI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; SI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[FPEXT]], [[INT]]
+ ; SI-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32)
+ ; SI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16)
+ ; SI-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; VI-LABEL: name: test_fdiv_s16_afn
+ ; VI: liveins: $vgpr0, $vgpr1
+ ; VI-NEXT: {{ $}}
+ ; VI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; VI-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; VI-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; VI-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; VI-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; VI-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX9-LABEL: name: test_fdiv_s16_afn
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX10-LABEL: name: test_fdiv_s16_afn
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16_afn
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s16) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC1]](s16)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s16) = afn G_FMUL [[TRUNC]], [[INT]]
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s16) = G_TRUNC %0
+ %3:_(s16) = G_TRUNC %1
+ %4:_(s16) = afn G_FDIV %2, %3
+ %5:_(s32) = G_ANYEXT %4
+ $vgpr0 = COPY %5
+...
+
+---
name: test_fdiv_s32_denorms_on
machineFunctionInfo:
mode:
@@ -192,15 +314,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_denorms_on
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[INT]]
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[FMUL]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_denorms_on
; GFX10: liveins: $vgpr0, $vgpr1
; GFX10-NEXT: {{ $}}
@@ -220,6 +333,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_on
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_FDIV %0, %1
@@ -227,6 +360,70 @@ body: |
...
---
+name: test_fdiv_s32_denorms_on_afn
+machineFunctionInfo:
+ mode:
+ fp32-input-denormals: true
+ fp32-output-denormals: true
+ fp64-fp16-input-denormals: true
+ fp64-fp16-output-denormals: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; SI-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; SI: liveins: $vgpr0, $vgpr1
+ ; SI-NEXT: {{ $}}
+ ; SI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; SI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; SI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; VI-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; VI: liveins: $vgpr0, $vgpr1
+ ; VI-NEXT: {{ $}}
+ ; VI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; VI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; VI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX9-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX9-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX10-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX10-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_on_afn
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX11-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = afn G_FDIV %0, %1
+ $vgpr0 = COPY %2
+...
+
+
+---
name: test_fdiv_s32_denorms_off
machineFunctionInfo:
mode:
@@ -305,15 +502,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_denorms_off
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[INT]]
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[FMUL]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_denorms_off
; GFX10: liveins: $vgpr0, $vgpr1
; GFX10-NEXT: {{ $}}
@@ -335,6 +523,28 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_off
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_FDIV %0, %1
@@ -342,6 +552,69 @@ body: |
...
---
+name: test_fdiv_s32_denorms_off_afn
+machineFunctionInfo:
+ mode:
+ fp32-input-denormals: false
+ fp32-output-denormals: false
+ fp64-fp16-input-denormals: true
+ fp64-fp16-output-denormals: true
+
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; SI-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; SI: liveins: $vgpr0, $vgpr1
+ ; SI-NEXT: {{ $}}
+ ; SI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; SI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; SI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; SI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; SI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; VI-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; VI: liveins: $vgpr0, $vgpr1
+ ; VI-NEXT: {{ $}}
+ ; VI-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; VI-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; VI-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; VI-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; VI-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX9-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX9-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX10-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX10-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_off_afn
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = afn G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = afn G_FMUL [[COPY]], [[INT]]
+ ; GFX11-NEXT: $vgpr0 = COPY [[FMUL]](s32)
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = afn G_FDIV %0, %1
+ $vgpr0 = COPY %2
+...
+
+---
name: test_fdiv_s32_denorms_off_arcp
machineFunctionInfo:
mode:
@@ -420,15 +693,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_denorms_off_arcp
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = arcp G_FMUL [[COPY]], [[INT]]
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[FMUL]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_denorms_off_arcp
; GFX10: liveins: $vgpr0, $vgpr1
; GFX10-NEXT: {{ $}}
@@ -450,6 +714,28 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_denorms_off_arcp
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s32), [[COPY1]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = arcp G_FNEG [[INT]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = arcp G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = arcp G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = arcp G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = arcp G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = arcp G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = arcp G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = arcp G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY1]](s32), [[COPY]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = arcp G_FDIV %0, %1
@@ -536,23 +822,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY1]](s64), [[COPY]](s64)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s64
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY1]]
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY1]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[FMA5]](s64)
- ;
; GFX10-LABEL: name: test_fdiv_s64
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
@@ -572,6 +841,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY1]](s64), [[COPY]](s64)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s64
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s64), [[COPY1]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[COPY]](s64), [[COPY1]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY1]](s64), [[COPY]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_FDIV %0, %1
@@ -708,20 +997,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s32
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV2]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[UV]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[UV1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
- ;
; GFX10-LABEL: name: test_fdiv_v2s32
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
@@ -760,6 +1035,45 @@ body: |
; GFX10-NEXT: [[INT13:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s32
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[UV2]](s32), [[UV]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s32), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 0
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s32), [[INT10:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 1
+ ; GFX11-NEXT: [[INT11:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s32)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[INT7]]
+ ; GFX11-NEXT: S_DENORM_MODE 15, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[INT11]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FMA5]], [[INT11]], [[INT11]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[INT9]], [[FMA6]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL1]], [[INT9]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FMA7]], [[FMA6]], [[FMUL1]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA8]], [[INT9]]
+ ; GFX11-NEXT: S_DENORM_MODE 12, implicit-def $mode, implicit $mode
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s32), [[FMA6]](s32), [[FMA8]](s32), [[INT10]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
%2:_(<2 x s32>) = G_FDIV %0, %1
@@ -877,20 +1191,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s32_flags
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV2]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[UV]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = nnan G_FMUL [[UV1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
- ;
; GFX10-LABEL: name: test_fdiv_v2s32_flags
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
@@ -925,6 +1225,41 @@ body: |
; GFX10-NEXT: [[INT13:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s32_flags
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV2]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = nnan G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[UV2]](s32), [[UV]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s32), [[INT8:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 0
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s32), [[INT10:%[0-9]+]]:_(s1) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV3]](s32), 1
+ ; GFX11-NEXT: [[INT11:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s32)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = nnan G_FNEG [[INT7]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG1]], [[INT11]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA5]], [[INT11]], [[INT11]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = nnan G_FMUL [[INT9]], [[FMA6]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG1]], [[FMUL1]], [[INT9]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = nnan G_FMA [[FMA7]], [[FMA6]], [[FMUL1]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = nnan G_FMA [[FNEG1]], [[FMA8]], [[INT9]]
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s32), [[FMA6]](s32), [[FMA8]](s32), [[INT10]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s32) = nnan G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV3]](s32), [[UV1]](s32)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
%2:_(<2 x s32>) = nnan G_FDIV %0, %1
@@ -1078,22 +1413,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32), [[INT20]](s32)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v3s32
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
- ; GFX9-UNSAFE-NEXT: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[UV]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV4]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[UV1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV5]](s32)
- ; GFX9-UNSAFE-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[UV2]], [[INT2]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32), [[FMUL2]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
- ;
; GFX10-LABEL: name: test_fdiv_v3s32
; GFX10: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
; GFX10-NEXT: {{ $}}
@@ -1140,6 +1459,53 @@ body: |
; GFX10-NEXT: [[INT20:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT19]](s32), [[UV5]](s32), [[UV2]](s32)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32), [[INT20]](s32)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v3s32
+ ; GFX11: liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>)
+ ; GFX11-NEXT: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV3]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s32), [[UV3]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[UV3]](s32), [[UV]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s32), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV4]](s32), 0
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s32), [[INT10:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s32), [[UV4]](s32), 1
+ ; GFX11-NEXT: [[INT11:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s32)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[INT7]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[INT11]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FMA5]], [[INT11]], [[INT11]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[INT9]], [[FMA6]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL1]], [[INT9]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FMA7]], [[FMA6]], [[FMUL1]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA8]], [[INT9]]
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s32), [[FMA6]](s32), [[FMA8]](s32), [[INT10]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s32), [[UV4]](s32), [[UV1]](s32)
+ ; GFX11-NEXT: [[INT14:%[0-9]+]]:_(s32), [[INT15:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV2]](s32), [[UV5]](s32), 0
+ ; GFX11-NEXT: [[INT16:%[0-9]+]]:_(s32), [[INT17:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV2]](s32), [[UV5]](s32), 1
+ ; GFX11-NEXT: [[INT18:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT14]](s32)
+ ; GFX11-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[INT14]]
+ ; GFX11-NEXT: [[FMA10:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[INT18]], [[C]]
+ ; GFX11-NEXT: [[FMA11:%[0-9]+]]:_(s32) = G_FMA [[FMA10]], [[INT18]], [[INT18]]
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[INT16]], [[FMA11]]
+ ; GFX11-NEXT: [[FMA12:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMUL2]], [[INT16]]
+ ; GFX11-NEXT: [[FMA13:%[0-9]+]]:_(s32) = G_FMA [[FMA12]], [[FMA11]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA14:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMA13]], [[INT16]]
+ ; GFX11-NEXT: [[INT19:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA14]](s32), [[FMA11]](s32), [[FMA13]](s32), [[INT17]](s1)
+ ; GFX11-NEXT: [[INT20:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT19]](s32), [[UV5]](s32), [[UV2]](s32)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[INT6]](s32), [[INT13]](s32), [[INT20]](s32)
+ ; GFX11-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
%0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5
%2:_(<3 x s32>) = G_FDIV %0, %1
@@ -1271,35 +1637,6 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[INT6]](s64), [[INT13]](s64)
; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s64
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[UV2]]
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV2]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[UV]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[UV]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[UV3]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[UV3]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA6:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[INT1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA7:%[0-9]+]]:_(s64) = G_FMA [[FMA6]], [[INT1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[FMA8:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMA7]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA9:%[0-9]+]]:_(s64) = G_FMA [[FMA8]], [[FMA7]], [[FMA7]]
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[UV1]], [[FMA9]]
- ; GFX9-UNSAFE-NEXT: [[FMA10:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMUL1]], [[UV1]]
- ; GFX9-UNSAFE-NEXT: [[FMA11:%[0-9]+]]:_(s64) = G_FMA [[FMA10]], [[FMA9]], [[FMUL1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FMA5]](s64), [[FMA11]](s64)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
- ;
; GFX10-LABEL: name: test_fdiv_v2s64
; GFX10: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
; GFX10-NEXT: {{ $}}
@@ -1334,6 +1671,41 @@ body: |
; GFX10-NEXT: [[INT13:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s64), [[UV3]](s64), [[UV1]](s64)
; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[INT6]](s64), [[INT13]](s64)
; GFX10-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s64
+ ; GFX11: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s64), [[UV2]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV]](s64), [[UV2]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[UV2]](s64), [[UV]](s64)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s64), [[INT8:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s64), [[UV3]](s64), 0
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[INT7]]
+ ; GFX11-NEXT: [[INT9:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT7]](s64)
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[INT9]], [[C]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s64) = G_FMA [[INT9]], [[FMA5]], [[INT9]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMA6]], [[C]]
+ ; GFX11-NEXT: [[INT10:%[0-9]+]]:_(s64), [[INT11:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[UV1]](s64), [[UV3]](s64), 1
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s64) = G_FMA [[FMA6]], [[FMA7]], [[FMA6]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s64) = G_FMUL [[INT10]], [[FMA8]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s64) = G_FMA [[FNEG1]], [[FMUL1]], [[INT10]]
+ ; GFX11-NEXT: [[INT12:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA9]](s64), [[FMA8]](s64), [[FMUL1]](s64), [[INT11]](s1)
+ ; GFX11-NEXT: [[INT13:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT12]](s64), [[UV3]](s64), [[UV1]](s64)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[INT6]](s64), [[INT13]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
%2:_(<2 x s64>) = G_FDIV %0, %1
@@ -1502,26 +1874,92 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v2s16
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX9-UNSAFE-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNSAFE-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC2]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC3]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FMUL]](s16), [[FMUL1]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX10-LABEL: name: test_fdiv_v2s16
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX10-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX10-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C1]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC2]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX10-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX10-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX10-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FMUL5]]
+ ; GFX10-NEXT: [[FADD4:%[0-9]+]]:_(s32) = G_FADD [[FMUL6]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FADD4]], [[INT2]]
+ ; GFX10-NEXT: [[FADD5:%[0-9]+]]:_(s32) = G_FADD [[FMUL7]], [[FMUL5]]
+ ; GFX10-NEXT: [[FMUL8:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FADD5]]
+ ; GFX10-NEXT: [[FADD6:%[0-9]+]]:_(s32) = G_FADD [[FMUL8]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL9:%[0-9]+]]:_(s32) = G_FMUL [[FADD6]], [[INT2]]
+ ; GFX10-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL9]], [[C1]]
+ ; GFX10-NEXT: [[FADD7:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FADD5]]
+ ; GFX10-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD7]](s32)
+ ; GFX10-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[TRUNC3]](s16), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX10-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v2s16
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
+ ; GFX11-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[UV2]](s16), [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL2]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FMA3]], [[INT2]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA4]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FMA5]], [[INT2]]
+ ; GFX11-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL3]], [[C]]
+ ; GFX11-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FMA4]]
+ ; GFX11-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[UV3]](s16), [[UV1]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
%2:_(<2 x s16>) = G_FDIV %0, %1
@@ -1756,37 +2194,133 @@ body: |
; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
; GFX9-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v3s16
- ; GFX9-UNSAFE: liveins: $vgpr0, $vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNSAFE-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
- ; GFX9-UNSAFE-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF1]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC3]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC4]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[INT2:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC5]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL2:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC2]], [[INT2]]
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL1]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL2]](s16)
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
- ; GFX9-UNSAFE-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
+ ; GFX10-LABEL: name: test_fdiv_v3s16
+ ; GFX10: liveins: $vgpr0, $vgpr1
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX10-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX10-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX10-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF1]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX10-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX10-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C1]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC3]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC4]](s16)
+ ; GFX10-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX10-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX10-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX10-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FMUL5]]
+ ; GFX10-NEXT: [[FADD4:%[0-9]+]]:_(s32) = G_FADD [[FMUL6]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FADD4]], [[INT2]]
+ ; GFX10-NEXT: [[FADD5:%[0-9]+]]:_(s32) = G_FADD [[FMUL7]], [[FMUL5]]
+ ; GFX10-NEXT: [[FMUL8:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FADD5]]
+ ; GFX10-NEXT: [[FADD6:%[0-9]+]]:_(s32) = G_FADD [[FMUL8]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL9:%[0-9]+]]:_(s32) = G_FMUL [[FADD6]], [[INT2]]
+ ; GFX10-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL9]], [[C1]]
+ ; GFX10-NEXT: [[FADD7:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FADD5]]
+ ; GFX10-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD7]](s32)
+ ; GFX10-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[TRUNC4]](s16), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC5]](s16)
+ ; GFX10-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX10-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX10-NEXT: [[FMUL10:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX10-NEXT: [[FMUL11:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FMUL10]]
+ ; GFX10-NEXT: [[FADD8:%[0-9]+]]:_(s32) = G_FADD [[FMUL11]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL12:%[0-9]+]]:_(s32) = G_FMUL [[FADD8]], [[INT4]]
+ ; GFX10-NEXT: [[FADD9:%[0-9]+]]:_(s32) = G_FADD [[FMUL12]], [[FMUL10]]
+ ; GFX10-NEXT: [[FMUL13:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FADD9]]
+ ; GFX10-NEXT: [[FADD10:%[0-9]+]]:_(s32) = G_FADD [[FMUL13]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL14:%[0-9]+]]:_(s32) = G_FMUL [[FADD10]], [[INT4]]
+ ; GFX10-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL14]], [[C1]]
+ ; GFX10-NEXT: [[FADD11:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FADD9]]
+ ; GFX10-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD11]](s32)
+ ; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[TRUNC5]](s16), [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX10-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[INT3]](s16)
+ ; GFX10-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[INT5]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
+ ; GFX10-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v3s16
+ ; GFX11: liveins: $vgpr0, $vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
+ ; GFX11-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
+ ; GFX11-NEXT: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[DEF1]](<4 x s16>)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[UV4]](s16), [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL2]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FMA3]], [[INT2]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA4]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FMA5]], [[INT2]]
+ ; GFX11-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL3]], [[C]]
+ ; GFX11-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FMA4]]
+ ; GFX11-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[UV5]](s16), [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+ ; GFX11-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
+ ; GFX11-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX11-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMUL4]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FMA6]], [[INT4]], [[FMUL4]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMA7]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FMA8]], [[INT4]]
+ ; GFX11-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL5]], [[C]]
+ ; GFX11-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FMA7]]
+ ; GFX11-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD2]](s32)
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[UV6]](s16), [[UV2]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT1]](s16)
+ ; GFX11-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[INT3]](s16)
+ ; GFX11-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[INT5]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32), [[ANYEXT2]](s32)
+ ; GFX11-NEXT: S_NOP 0, implicit [[BUILD_VECTOR]](<3 x s32>)
%0:_(<3 x s16>) = G_IMPLICIT_DEF
%1:_(<3 x s16>) = G_IMPLICIT_DEF
%2:_(<3 x s16>) = G_FDIV %0, %1
@@ -2094,42 +2628,164 @@ body: |
; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_v4s16
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; GFX9-UNSAFE-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
- ; GFX9-UNSAFE-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX9-UNSAFE-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
- ; GFX9-UNSAFE-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
- ; GFX9-UNSAFE-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
- ; GFX9-UNSAFE-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC4]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC5]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL1:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC1]], [[INT1]]
- ; GFX9-UNSAFE-NEXT: [[INT2:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC6]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL2:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC2]], [[INT2]]
- ; GFX9-UNSAFE-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC7]](s16)
- ; GFX9-UNSAFE-NEXT: [[FMUL3:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC3]], [[INT3]]
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FMUL]](s16), [[FMUL1]](s16)
- ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FMUL2]](s16), [[FMUL3]](s16)
- ; GFX9-UNSAFE-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ; GFX10-LABEL: name: test_fdiv_v4s16
+ ; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX10-NEXT: {{ $}}
+ ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
+ ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
+ ; GFX10-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX10-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX10-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX10-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX10-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX10-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX10-NEXT: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX10-NEXT: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX10-NEXT: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX10-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC4]](s16)
+ ; GFX10-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX10-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FMUL]]
+ ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL1]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FADD]], [[INT]]
+ ; GFX10-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FMUL2]], [[FMUL]]
+ ; GFX10-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FNEG]], [[FADD1]]
+ ; GFX10-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FMUL3]], [[FPEXT]]
+ ; GFX10-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FADD2]], [[INT]]
+ ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL4]], [[C1]]
+ ; GFX10-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FADD1]]
+ ; GFX10-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[TRUNC4]](s16), [[TRUNC]](s16)
+ ; GFX10-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC5]](s16)
+ ; GFX10-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX10-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX10-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX10-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FMUL5]]
+ ; GFX10-NEXT: [[FADD4:%[0-9]+]]:_(s32) = G_FADD [[FMUL6]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FADD4]], [[INT2]]
+ ; GFX10-NEXT: [[FADD5:%[0-9]+]]:_(s32) = G_FADD [[FMUL7]], [[FMUL5]]
+ ; GFX10-NEXT: [[FMUL8:%[0-9]+]]:_(s32) = G_FMUL [[FNEG1]], [[FADD5]]
+ ; GFX10-NEXT: [[FADD6:%[0-9]+]]:_(s32) = G_FADD [[FMUL8]], [[FPEXT2]]
+ ; GFX10-NEXT: [[FMUL9:%[0-9]+]]:_(s32) = G_FMUL [[FADD6]], [[INT2]]
+ ; GFX10-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL9]], [[C1]]
+ ; GFX10-NEXT: [[FADD7:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FADD5]]
+ ; GFX10-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD7]](s32)
+ ; GFX10-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[TRUNC5]](s16), [[TRUNC1]](s16)
+ ; GFX10-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC6]](s16)
+ ; GFX10-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX10-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX10-NEXT: [[FMUL10:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX10-NEXT: [[FMUL11:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FMUL10]]
+ ; GFX10-NEXT: [[FADD8:%[0-9]+]]:_(s32) = G_FADD [[FMUL11]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL12:%[0-9]+]]:_(s32) = G_FMUL [[FADD8]], [[INT4]]
+ ; GFX10-NEXT: [[FADD9:%[0-9]+]]:_(s32) = G_FADD [[FMUL12]], [[FMUL10]]
+ ; GFX10-NEXT: [[FMUL13:%[0-9]+]]:_(s32) = G_FMUL [[FNEG2]], [[FADD9]]
+ ; GFX10-NEXT: [[FADD10:%[0-9]+]]:_(s32) = G_FADD [[FMUL13]], [[FPEXT4]]
+ ; GFX10-NEXT: [[FMUL14:%[0-9]+]]:_(s32) = G_FMUL [[FADD10]], [[INT4]]
+ ; GFX10-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL14]], [[C1]]
+ ; GFX10-NEXT: [[FADD11:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FADD9]]
+ ; GFX10-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD11]](s32)
+ ; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[TRUNC6]](s16), [[TRUNC2]](s16)
+ ; GFX10-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC7]](s16)
+ ; GFX10-NEXT: [[FNEG3:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT7]]
+ ; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT7]](s32)
+ ; GFX10-NEXT: [[FMUL15:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT6]], [[INT6]]
+ ; GFX10-NEXT: [[FMUL16:%[0-9]+]]:_(s32) = G_FMUL [[FNEG3]], [[FMUL15]]
+ ; GFX10-NEXT: [[FADD12:%[0-9]+]]:_(s32) = G_FADD [[FMUL16]], [[FPEXT6]]
+ ; GFX10-NEXT: [[FMUL17:%[0-9]+]]:_(s32) = G_FMUL [[FADD12]], [[INT6]]
+ ; GFX10-NEXT: [[FADD13:%[0-9]+]]:_(s32) = G_FADD [[FMUL17]], [[FMUL15]]
+ ; GFX10-NEXT: [[FMUL18:%[0-9]+]]:_(s32) = G_FMUL [[FNEG3]], [[FADD13]]
+ ; GFX10-NEXT: [[FADD14:%[0-9]+]]:_(s32) = G_FADD [[FMUL18]], [[FPEXT6]]
+ ; GFX10-NEXT: [[FMUL19:%[0-9]+]]:_(s32) = G_FMUL [[FADD14]], [[INT6]]
+ ; GFX10-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[FMUL19]], [[C1]]
+ ; GFX10-NEXT: [[FADD15:%[0-9]+]]:_(s32) = G_FADD [[AND3]], [[FADD13]]
+ ; GFX10-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD15]](s32)
+ ; GFX10-NEXT: [[INT7:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC3]](s16), [[TRUNC7]](s16), [[TRUNC3]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX10-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT5]](s16), [[INT7]](s16)
+ ; GFX10-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
+ ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_v4s16
+ ; GFX11: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
+ ; GFX11-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX11-NEXT: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX11-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT1]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT1]](s32)
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA1]], [[FPEXT]]
+ ; GFX11-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL [[FMA2]], [[INT]]
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8388608
+ ; GFX11-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FMUL1]], [[C]]
+ ; GFX11-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[AND]], [[FMA1]]
+ ; GFX11-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
+ ; GFX11-NEXT: [[INT1:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC]](s16), [[UV4]](s16), [[UV]](s16)
+ ; GFX11-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
+ ; GFX11-NEXT: [[FNEG1:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT3]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT3]](s32)
+ ; GFX11-NEXT: [[FMUL2:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT2]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMUL2]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FMA3]], [[INT2]], [[FMUL2]]
+ ; GFX11-NEXT: [[FMA5:%[0-9]+]]:_(s32) = G_FMA [[FNEG1]], [[FMA4]], [[FPEXT2]]
+ ; GFX11-NEXT: [[FMUL3:%[0-9]+]]:_(s32) = G_FMUL [[FMA5]], [[INT2]]
+ ; GFX11-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FMUL3]], [[C]]
+ ; GFX11-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[AND1]], [[FMA4]]
+ ; GFX11-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC1]](s16), [[UV5]](s16), [[UV1]](s16)
+ ; GFX11-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+ ; GFX11-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
+ ; GFX11-NEXT: [[FNEG2:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT5]]
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT5]](s32)
+ ; GFX11-NEXT: [[FMUL4:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMA6:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMUL4]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMA7:%[0-9]+]]:_(s32) = G_FMA [[FMA6]], [[INT4]], [[FMUL4]]
+ ; GFX11-NEXT: [[FMA8:%[0-9]+]]:_(s32) = G_FMA [[FNEG2]], [[FMA7]], [[FPEXT4]]
+ ; GFX11-NEXT: [[FMUL5:%[0-9]+]]:_(s32) = G_FMUL [[FMA8]], [[INT4]]
+ ; GFX11-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[FMUL5]], [[C]]
+ ; GFX11-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[AND2]], [[FMA7]]
+ ; GFX11-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD2]](s32)
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC2]](s16), [[UV6]](s16), [[UV2]](s16)
+ ; GFX11-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
+ ; GFX11-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16)
+ ; GFX11-NEXT: [[FNEG3:%[0-9]+]]:_(s32) = G_FNEG [[FPEXT7]]
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FPEXT7]](s32)
+ ; GFX11-NEXT: [[FMUL6:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT6]], [[INT6]]
+ ; GFX11-NEXT: [[FMA9:%[0-9]+]]:_(s32) = G_FMA [[FNEG3]], [[FMUL6]], [[FPEXT6]]
+ ; GFX11-NEXT: [[FMA10:%[0-9]+]]:_(s32) = G_FMA [[FMA9]], [[INT6]], [[FMUL6]]
+ ; GFX11-NEXT: [[FMA11:%[0-9]+]]:_(s32) = G_FMA [[FNEG3]], [[FMA10]], [[FPEXT6]]
+ ; GFX11-NEXT: [[FMUL7:%[0-9]+]]:_(s32) = G_FMUL [[FMA11]], [[INT6]]
+ ; GFX11-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[FMUL7]], [[C]]
+ ; GFX11-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[AND3]], [[FMA10]]
+ ; GFX11-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
+ ; GFX11-NEXT: [[INT7:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[FPTRUNC3]](s16), [[UV7]](s16), [[UV3]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT1]](s16), [[INT3]](s16)
+ ; GFX11-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[INT5]](s16), [[INT7]](s16)
+ ; GFX11-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr2_vgpr3
%2:_(<4 x s16>) = G_FDIV %0, %1
@@ -2185,15 +2841,6 @@ body: |
; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s16_constant_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s16_constant_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2202,6 +2849,15 @@ body: |
; GFX10-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC]](s16)
; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16_constant_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[TRUNC]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s16) = G_FCONSTANT half 1.0
%1:_(s32) = COPY $vgpr0
%2:_(s16) = G_TRUNC %1
@@ -2261,16 +2917,6 @@ body: |
; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s16) = G_FNEG [[TRUNC]]
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s16)
- ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2280,6 +2926,16 @@ body: |
; GFX10-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s16)
; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s16_constant_negative_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s16) = G_FNEG [[TRUNC]]
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s16)
+ ; GFX11-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[INT]](s16)
+ ; GFX11-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s16) = G_FCONSTANT half -1.0
%1:_(s32) = COPY $vgpr0
%2:_(s16) = G_TRUNC %1
@@ -2351,13 +3007,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_constant_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[INT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_constant_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2376,6 +3025,25 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_constant_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = G_FCONSTANT float 1.0
%1:_(s32) = COPY $vgpr0
%2:_(s32) = G_FDIV %0, %1
@@ -2448,14 +3116,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX9-NEXT: $vgpr0 = COPY [[INT6]](s32)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s32_constant_negative_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FNEG]](s32)
- ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[INT]](s32)
- ;
; GFX10-LABEL: name: test_fdiv_s32_constant_negative_one_rcp
; GFX10: liveins: $vgpr0
; GFX10-NEXT: {{ $}}
@@ -2475,6 +3135,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
; GFX10-NEXT: $vgpr0 = COPY [[INT6]](s32)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s32_constant_negative_one_rcp
+ ; GFX11: liveins: $vgpr0
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float -1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX11-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s32), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 0
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s32), [[INT3:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s32), [[COPY]](s32), 1
+ ; GFX11-NEXT: [[INT4:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s32)
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s32) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[INT4]], [[C1]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s32) = G_FMA [[FMA]], [[INT4]], [[INT4]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]]
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMA3]], [[INT2]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s32), [[FMA1]](s32), [[FMA3]](s32), [[INT3]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s32), [[COPY]](s32), [[C]](s32)
+ ; GFX11-NEXT: $vgpr0 = COPY [[INT6]](s32)
%0:_(s32) = G_FCONSTANT float -1.0
%1:_(s32) = COPY $vgpr0
%2:_(s32) = G_FDIV %0, %1
@@ -2558,22 +3238,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s64_constant_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[C]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[FMA5]](s64)
- ;
; GFX10-LABEL: name: test_fdiv_s64_constant_one_rcp
; GFX10: liveins: $vgpr0_vgpr1
; GFX10-NEXT: {{ $}}
@@ -2592,6 +3256,25 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s64_constant_one_rcp
+ ; GFX11: liveins: $vgpr0_vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
%0:_(s64) = G_FCONSTANT double 1.0
%1:_(s64) = COPY $vgpr0_vgpr1
%2:_(s64) = G_FDIV %0, %1
@@ -2678,23 +3361,6 @@ body: |
; GFX9-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
;
- ; GFX9-UNSAFE-LABEL: name: test_fdiv_s64_constant_negative_one_rcp
- ; GFX9-UNSAFE: liveins: $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: {{ $}}
- ; GFX9-UNSAFE-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double -1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
- ; GFX9-UNSAFE-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY]]
- ; GFX9-UNSAFE-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
- ; GFX9-UNSAFE-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[COPY]](s64)
- ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT]], [[C1]]
- ; GFX9-UNSAFE-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[FMA]], [[INT]], [[INT]]
- ; GFX9-UNSAFE-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C1]]
- ; GFX9-UNSAFE-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA2]], [[FMA1]], [[FMA1]]
- ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[C]], [[FMA3]]
- ; GFX9-UNSAFE-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[C]]
- ; GFX9-UNSAFE-NEXT: [[FMA5:%[0-9]+]]:_(s64) = G_FMA [[FMA4]], [[FMA3]], [[FMUL]]
- ; GFX9-UNSAFE-NEXT: $vgpr0_vgpr1 = COPY [[FMA5]](s64)
- ;
; GFX10-LABEL: name: test_fdiv_s64_constant_negative_one_rcp
; GFX10: liveins: $vgpr0_vgpr1
; GFX10-NEXT: {{ $}}
@@ -2714,6 +3380,26 @@ body: |
; GFX10-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
; GFX10-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
+ ;
+ ; GFX11-LABEL: name: test_fdiv_s64_constant_negative_one_rcp
+ ; GFX11: liveins: $vgpr0_vgpr1
+ ; GFX11-NEXT: {{ $}}
+ ; GFX11-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double -1.000000e+00
+ ; GFX11-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX11-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+ ; GFX11-NEXT: [[INT:%[0-9]+]]:_(s64), [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 0
+ ; GFX11-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INT]]
+ ; GFX11-NEXT: [[INT2:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[INT]](s64)
+ ; GFX11-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[INT2]], [[C1]]
+ ; GFX11-NEXT: [[FMA1:%[0-9]+]]:_(s64) = G_FMA [[INT2]], [[FMA]], [[INT2]]
+ ; GFX11-NEXT: [[FMA2:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMA1]], [[C1]]
+ ; GFX11-NEXT: [[INT3:%[0-9]+]]:_(s64), [[INT4:%[0-9]+]]:_(s1) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.scale), [[C]](s64), [[COPY]](s64), 1
+ ; GFX11-NEXT: [[FMA3:%[0-9]+]]:_(s64) = G_FMA [[FMA1]], [[FMA2]], [[FMA1]]
+ ; GFX11-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[INT3]], [[FMA3]]
+ ; GFX11-NEXT: [[FMA4:%[0-9]+]]:_(s64) = G_FMA [[FNEG]], [[FMUL]], [[INT3]]
+ ; GFX11-NEXT: [[INT5:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fmas), [[FMA4]](s64), [[FMA3]](s64), [[FMUL]](s64), [[INT4]](s1)
+ ; GFX11-NEXT: [[INT6:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.div.fixup), [[INT5]](s64), [[COPY]](s64), [[C]](s64)
+ ; GFX11-NEXT: $vgpr0_vgpr1 = COPY [[INT6]](s64)
%0:_(s64) = G_FCONSTANT double -1.0
%1:_(s64) = COPY $vgpr0_vgpr1
%2:_(s64) = G_FDIV %0, %1
diff --git a/llvm/test/CodeGen/AMDGPU/add-max.ll b/llvm/test/CodeGen/AMDGPU/add-max.ll
index b992506..00c6656 100644
--- a/llvm/test/CodeGen/AMDGPU/add-max.ll
+++ b/llvm/test/CodeGen/AMDGPU/add-max.ll
@@ -5,9 +5,7 @@
define amdgpu_ps float @add_max_u32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_max_u32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, v0, v2
+; GCN-NEXT: v_add_max_u32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
@@ -18,9 +16,7 @@ define amdgpu_ps float @add_max_u32_vvv(i32 %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_max_u32_svv(i32 inreg %a, i32 %b, i32 %c) {
; GCN-LABEL: add_max_u32_svv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, v0, v1
+; GCN-NEXT: v_add_max_u32_e64 v0, s0, v0, v1
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
@@ -29,12 +25,17 @@ define amdgpu_ps float @add_max_u32_svv(i32 inreg %a, i32 %b, i32 %c) {
}
define amdgpu_ps float @add_max_u32_ssv(i32 inreg %a, i32 inreg %b, i32 %c) {
-; GCN-LABEL: add_max_u32_ssv:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_add_co_i32 s0, s0, s1
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT: v_max_u32_e32 v0, s0, v0
-; GCN-NEXT: ; return to shader part epilog
+; SDAG-LABEL: add_max_u32_ssv:
+; SDAG: ; %bb.0:
+; SDAG-NEXT: v_add_max_u32_e64 v0, s0, s1, v0
+; SDAG-NEXT: ; return to shader part epilog
+;
+; GISEL-LABEL: add_max_u32_ssv:
+; GISEL: ; %bb.0:
+; GISEL-NEXT: s_add_co_i32 s0, s0, s1
+; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-NEXT: v_max_u32_e32 v0, s0, v0
+; GISEL-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
%ret = bitcast i32 %max to float
@@ -58,9 +59,7 @@ define amdgpu_ps float @add_max_u32_sss(i32 inreg %a, i32 inreg %b, i32 inreg %c
define amdgpu_ps float @add_max_u32_vsi(i32 %a, i32 inreg %b) {
; GCN-LABEL: add_max_u32_vsi:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, 4, v0
+; GCN-NEXT: v_add_max_u32_e64 v0, v0, s0, 4
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 4)
@@ -71,9 +70,7 @@ define amdgpu_ps float @add_max_u32_vsi(i32 %a, i32 inreg %b) {
define amdgpu_ps float @add_max_u32_svl(i32 inreg %a, i32 %b) {
; GCN-LABEL: add_max_u32_svl:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, s0, v0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_u32_e32 v0, 0x64, v0
+; GCN-NEXT: v_add_max_u32_e64 v0, s0, v0, 0x64
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 100)
@@ -82,12 +79,17 @@ define amdgpu_ps float @add_max_u32_svl(i32 inreg %a, i32 %b) {
}
define amdgpu_ps float @add_max_u32_slv(i32 inreg %a, i32 %b) {
-; GCN-LABEL: add_max_u32_slv:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_addk_co_i32 s0, 0x64
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT: v_max_u32_e32 v0, s0, v0
-; GCN-NEXT: ; return to shader part epilog
+; SDAG-LABEL: add_max_u32_slv:
+; SDAG: ; %bb.0:
+; SDAG-NEXT: v_add_max_u32_e64 v0, 0x64, s0, v0
+; SDAG-NEXT: ; return to shader part epilog
+;
+; GISEL-LABEL: add_max_u32_slv:
+; GISEL: ; %bb.0:
+; GISEL-NEXT: s_addk_co_i32 s0, 0x64
+; GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-NEXT: v_max_u32_e32 v0, s0, v0
+; GISEL-NEXT: ; return to shader part epilog
%add = add i32 %a, 100
%max = call i32 @llvm.umax.i32(i32 %add, i32 %b)
%ret = bitcast i32 %max to float
@@ -97,9 +99,7 @@ define amdgpu_ps float @add_max_u32_slv(i32 inreg %a, i32 %b) {
define amdgpu_ps float @add_max_i32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_max_i32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_max_i32_e32 v0, v0, v2
+; GCN-NEXT: v_add_max_i32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.smax.i32(i32 %add, i32 %c)
@@ -110,9 +110,7 @@ define amdgpu_ps float @add_max_i32_vvv(i32 %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_min_u32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_min_u32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_min_u32_e32 v0, v0, v2
+; GCN-NEXT: v_add_min_u32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umin.i32(i32 %add, i32 %c)
@@ -123,9 +121,7 @@ define amdgpu_ps float @add_min_u32_vvv(i32 %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_min_i32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_min_i32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_nc_u32_e32 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_min_i32_e32 v0, v0, v2
+; GCN-NEXT: v_add_min_i32_e64 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.smin.i32(i32 %add, i32 %c)
diff --git a/llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll b/llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll
new file mode 100644
index 0000000..fcbf7ef
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/code-size-estimate-gfx1250.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -show-mc-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
+
+define i16 @cvt_pk_bf8_f16_v(ptr addrspace(1) %out) {
+; GFX1250-LABEL: cvt_pk_bf8_f16_v:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; encoding: [0x00,0x00,0xc8,0xbf]
+; GFX1250-NEXT: s_wait_kmcnt 0x0 ; encoding: [0x00,0x00,0xc7,0xbf]
+; GFX1250-NEXT: v_cvt_pk_bf8_f16 v0, 0x38003800 ; encoding: [0x00,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x38]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31] ; encoding: [0x1e,0x48,0x80,0xbe]
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> <half 0xH3800, half 0xH3800>)
+ ret i16 %cvt
+}
+
+; GFX1250: codeLenInByte = 24
+
+define i16 @cvt_pk_fp8_f16_v(ptr addrspace(1) %out) {
+; GFX1250-LABEL: cvt_pk_fp8_f16_v:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0 ; encoding: [0x00,0x00,0xc8,0xbf]
+; GFX1250-NEXT: s_wait_kmcnt 0x0 ; encoding: [0x00,0x00,0xc7,0xbf]
+; GFX1250-NEXT: v_cvt_pk_fp8_f16 v0, 0x3800 ; encoding: [0x00,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31] ; encoding: [0x1e,0x48,0x80,0xbe]
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> <half 0xH3800, half 0xH0>)
+ ret i16 %cvt
+}
+
+; GFX1250: codeLenInByte = 24
diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
index 9ae9d19..210e09f 100644
--- a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
@@ -1702,7 +1702,7 @@ entry:
%gep.r = getelementptr inbounds half, ptr addrspace(1) %r, i64 %tid.ext
%a.val = load volatile half, ptr addrspace(1) %gep.a
%b.val = load volatile half, ptr addrspace(1) %gep.b
- %r.val = fdiv half %a.val, %b.val
+ %r.val = fdiv afn half %a.val, %b.val
store half %r.val, ptr addrspace(1) %gep.r
ret void
}
@@ -2475,4 +2475,4 @@ declare <2 x half> @llvm.sqrt.v2f16(<2 x half>) #2
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="true" }
+attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
index 57b4857..c52fb61 100644
--- a/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
@@ -11,6 +11,10 @@
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SDAG-FAKE16 %s
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=-flat-for-global,+real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-GISEL-TRUE16 %s
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-GISEL-FAKE16 %s
+; TODO: FIXME-TRUE16 llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=0 -mattr=-flat-for-global,+real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-SDAG-TRUE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=0 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-SDAG-FAKE16 %s
+; TODO: FIXME-TRUE16 llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=1 -mattr=-flat-for-global,+real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-GISEL-TRUE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1250 -global-isel=1 -mattr=-flat-for-global,-real-true16 -denormal-fp-math=preserve-sign < %s | FileCheck -enable-var-scope -check-prefixes=GFX1250-GISEL-FAKE16 %s
define amdgpu_kernel void @fptrunc_f32_to_f16(
; SI-SDAG-LABEL: fptrunc_f32_to_f16:
@@ -192,6 +196,39 @@ define amdgpu_kernel void @fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -381,6 +418,39 @@ define amdgpu_kernel void @fptrunc_f32_to_f16_afn(ptr addrspace(1) %r,
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16_afn:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16_afn:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %a) {
entry:
%a.val = load float, ptr addrspace(1) %a
@@ -1089,6 +1159,130 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f64_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s3, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-SDAG-FAKE16-NEXT: s_sub_co_i32 s4, 0x3f1, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_med3_i32 v1, s4, 0, 13
+; GFX1250-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_addk_co_i32 s3, 0xfc10
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_add_co_i32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
+; GFX1250-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s4, s8, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s3, 0x40f
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s4, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f64_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s6, s3, 0x1ff
+; GFX1250-GISEL-FAKE16-NEXT: s_bfe_u32 s4, s3, 0xb0014
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s5, s3, 8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s6, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_addk_co_i32 s4, 0xfc10
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s5, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_sub_co_i32 s6, 1, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s8, s2, 0x1000
+; GFX1250-GISEL-FAKE16-NEXT: s_max_i32 s6, s6, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s7, s4, 12
+; GFX1250-GISEL-FAKE16-NEXT: s_min_i32 s6, s6, 13
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s5, s5, 9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s9, s8, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s2, s7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s6, s9, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s5, s5, 0x7c00
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s6, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s9, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lt_i32 s4, 1
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, s6, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s6, s2, 7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s2, s2, 2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s6, 3
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s7, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s6, 5
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s7, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_add_co_i32 s2, s2, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s4, 30
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, 0x7c00, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s4, 0x40f
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, s5, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s3, 16
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s3, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -1101,62 +1295,21 @@ entry:
define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; SI-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; SI-SDAG: ; %bb.0: ; %entry
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
-; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s2, -1
-; SI-SDAG-NEXT: s_mov_b32 s10, s2
-; SI-SDAG-NEXT: s_mov_b32 s11, s3
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s6, -1
+; SI-SDAG-NEXT: s_mov_b32 s10, s6
+; SI-SDAG-NEXT: s_mov_b32 s11, s7
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s8, s6
-; SI-SDAG-NEXT: s_mov_b32 s9, s7
+; SI-SDAG-NEXT: s_mov_b32 s8, s2
+; SI-SDAG-NEXT: s_mov_b32 s9, s3
; SI-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
-; SI-SDAG-NEXT: s_movk_i32 s0, 0x7e00
+; SI-SDAG-NEXT: s_mov_b32 s4, s0
+; SI-SDAG-NEXT: s_mov_b32 s5, s1
; SI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; SI-SDAG-NEXT: v_readfirstlane_b32 s1, v1
-; SI-SDAG-NEXT: s_and_b32 s6, s1, 0x1ff
-; SI-SDAG-NEXT: s_lshr_b32 s7, s1, 8
-; SI-SDAG-NEXT: s_bfe_u32 s8, s1, 0xb0014
-; SI-SDAG-NEXT: v_or_b32_e32 v0, s6, v0
-; SI-SDAG-NEXT: s_and_b32 s6, s7, 0xffe
-; SI-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; SI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; SI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; SI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; SI-SDAG-NEXT: s_or_b32 s6, s6, s7
-; SI-SDAG-NEXT: s_or_b32 s7, s6, 0x1000
-; SI-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; SI-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; SI-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; SI-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; SI-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; SI-SDAG-NEXT: s_or_b32 s7, s10, s7
-; SI-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; SI-SDAG-NEXT: s_or_b32 s9, s6, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; SI-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; SI-SDAG-NEXT: s_and_b32 s9, s7, 7
-; SI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; SI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; SI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; SI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; SI-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; SI-SDAG-NEXT: s_or_b32 s9, s9, s10
-; SI-SDAG-NEXT: s_add_i32 s7, s7, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; SI-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; SI-SDAG-NEXT: s_cmp_lg_u32 s6, 0
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, 0x7c00
-; SI-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, s7
-; SI-SDAG-NEXT: s_lshr_b32 s1, s1, 16
-; SI-SDAG-NEXT: s_and_b32 s1, s1, 0x8000
-; SI-SDAG-NEXT: s_or_b32 s6, s1, s0
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
-; SI-SDAG-NEXT: v_mov_b32_e32 v0, s6
-; SI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; SI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; SI-SDAG-NEXT: s_endpgm
;
; SI-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1174,62 +1327,21 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
;
; VI-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; VI-SDAG: ; %bb.0: ; %entry
-; VI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SDAG-NEXT: s_mov_b32 s10, s2
-; VI-SDAG-NEXT: s_mov_b32 s11, s3
+; VI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s6, -1
+; VI-SDAG-NEXT: s_mov_b32 s10, s6
+; VI-SDAG-NEXT: s_mov_b32 s11, s7
; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s8, s6
-; VI-SDAG-NEXT: s_mov_b32 s9, s7
+; VI-SDAG-NEXT: s_mov_b32 s8, s2
+; VI-SDAG-NEXT: s_mov_b32 s9, s3
; VI-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
-; VI-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; VI-SDAG-NEXT: s_mov_b32 s4, s0
+; VI-SDAG-NEXT: s_mov_b32 s5, s1
; VI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; VI-SDAG-NEXT: v_readfirstlane_b32 s4, v1
-; VI-SDAG-NEXT: s_and_b32 s5, s4, 0x1ff
-; VI-SDAG-NEXT: v_or_b32_e32 v0, s5, v0
-; VI-SDAG-NEXT: s_lshr_b32 s7, s4, 8
-; VI-SDAG-NEXT: s_bfe_u32 s8, s4, 0xb0014
-; VI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-SDAG-NEXT: s_and_b32 s5, s7, 0xffe
-; VI-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; VI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; VI-SDAG-NEXT: s_or_b32 s5, s5, s7
-; VI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; VI-SDAG-NEXT: s_or_b32 s7, s5, 0x1000
-; VI-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; VI-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; VI-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; VI-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; VI-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; VI-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; VI-SDAG-NEXT: s_or_b32 s7, s10, s7
-; VI-SDAG-NEXT: s_or_b32 s9, s5, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; VI-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; VI-SDAG-NEXT: s_and_b32 s9, s7, 7
-; VI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; VI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; VI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; VI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; VI-SDAG-NEXT: s_or_b32 s9, s9, s10
-; VI-SDAG-NEXT: s_add_i32 s7, s7, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; VI-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; VI-SDAG-NEXT: s_cmp_lg_u32 s5, 0
-; VI-SDAG-NEXT: s_cselect_b32 s5, s6, 0x7c00
-; VI-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; VI-SDAG-NEXT: s_cselect_b32 s5, s5, s7
-; VI-SDAG-NEXT: s_lshr_b32 s4, s4, 16
-; VI-SDAG-NEXT: s_and_b32 s4, s4, 0x8000
-; VI-SDAG-NEXT: s_or_b32 s4, s4, s5
-; VI-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; VI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; VI-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1247,62 +1359,21 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
;
; GFX9-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; GFX9-SDAG: ; %bb.0: ; %entry
-; GFX9-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX9-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX9-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX9-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX9-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX9-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX9-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX9-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX9-SDAG-NEXT: s_mov_b32 s11, s7
; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX9-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX9-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
-; GFX9-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX9-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX9-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX9-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX9-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX9-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
+; GFX9-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX9-SDAG-NEXT: s_mov_b32 s5, s1
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s5, v1
-; GFX9-SDAG-NEXT: s_and_b32 s6, s5, 0x1ff
-; GFX9-SDAG-NEXT: v_or_b32_e32 v0, s6, v0
-; GFX9-SDAG-NEXT: s_lshr_b32 s7, s5, 8
-; GFX9-SDAG-NEXT: s_bfe_u32 s8, s5, 0xb0014
-; GFX9-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX9-SDAG-NEXT: s_and_b32 s6, s7, 0xffe
-; GFX9-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX9-SDAG-NEXT: s_or_b32 s7, s6, 0x1000
-; GFX9-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX9-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; GFX9-SDAG-NEXT: s_or_b32 s7, s10, s7
-; GFX9-SDAG-NEXT: s_or_b32 s9, s6, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; GFX9-SDAG-NEXT: s_and_b32 s9, s7, 7
-; GFX9-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX9-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX9-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX9-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX9-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; GFX9-SDAG-NEXT: s_or_b32 s9, s9, s10
-; GFX9-SDAG-NEXT: s_add_i32 s7, s7, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s6, 0
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX9-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, s7
-; GFX9-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX9-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX9-SDAG-NEXT: s_or_b32 s4, s5, s4
-; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX9-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX9-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX9-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX9-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; GFX9-SDAG-NEXT: s_endpgm
;
; GFX9-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1320,62 +1391,21 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
;
; GFX950-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; GFX950-SDAG: ; %bb.0: ; %entry
-; GFX950-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX950-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX950-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX950-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX950-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX950-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX950-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX950-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX950-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX950-SDAG-NEXT: s_mov_b32 s11, s7
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX950-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX950-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX950-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
-; GFX950-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX950-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX950-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX950-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX950-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX950-SDAG-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
+; GFX950-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX950-SDAG-NEXT: s_mov_b32 s5, s1
; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s5, v1
-; GFX950-SDAG-NEXT: s_and_b32 s6, s5, 0x1ff
-; GFX950-SDAG-NEXT: v_or_b32_e32 v0, s6, v0
-; GFX950-SDAG-NEXT: s_lshr_b32 s7, s5, 8
-; GFX950-SDAG-NEXT: s_bfe_u32 s8, s5, 0xb0014
-; GFX950-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX950-SDAG-NEXT: s_and_b32 s6, s7, 0xffe
-; GFX950-SDAG-NEXT: s_sub_i32 s7, 0x3f1, s8
-; GFX950-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX950-SDAG-NEXT: v_med3_i32 v1, s7, 0, 13
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX950-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX950-SDAG-NEXT: s_or_b32 s7, s6, 0x1000
-; GFX950-SDAG-NEXT: s_lshr_b32 s10, s7, s9
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s10, s9
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s9, s7
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX950-SDAG-NEXT: s_addk_i32 s8, 0xfc10
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s8, 12
-; GFX950-SDAG-NEXT: s_or_b32 s7, s10, s7
-; GFX950-SDAG-NEXT: s_or_b32 s9, s6, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s8, 1
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s7, s9
-; GFX950-SDAG-NEXT: s_and_b32 s9, s7, 7
-; GFX950-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX950-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX950-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX950-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX950-SDAG-NEXT: s_lshr_b32 s7, s7, 2
-; GFX950-SDAG-NEXT: s_or_b32 s9, s9, s10
-; GFX950-SDAG-NEXT: s_add_i32 s7, s7, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s8, 31
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s7, 0x7c00
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s6, 0
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX950-SDAG-NEXT: s_cmpk_eq_i32 s8, 0x40f
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, s7
-; GFX950-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX950-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX950-SDAG-NEXT: s_or_b32 s4, s5, s4
-; GFX950-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX950-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX950-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX950-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX950-SDAG-NEXT: buffer_store_short v0, off, s[4:7], 0
; GFX950-SDAG-NEXT: s_endpgm
;
; GFX950-GISEL-LABEL: fptrunc_f64_to_f16_afn:
@@ -1401,60 +1431,13 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s9, s3
-; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
-; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s2, v1
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, s3, v0
-; GFX11-SDAG-TRUE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-TRUE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-TRUE16-NEXT: v_med3_i32 v1, s4, 0, 13
-; GFX11-SDAG-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s8, v1
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-TRUE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s4, s8, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s4, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s2, s2, 16
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0
; GFX11-SDAG-TRUE16-NEXT: buffer_store_b16 v0, off, s[4:7], 0
; GFX11-SDAG-TRUE16-NEXT: s_endpgm
;
@@ -1468,60 +1451,13 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
-; GFX11-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
-; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v1
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s3, v0
-; GFX11-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-FAKE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-FAKE16-NEXT: v_med3_i32 v1, s4, 0, 13
-; GFX11-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v1
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s4, s8, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s4, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX11-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], 0
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
; GFX11-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], 0
; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
@@ -1552,6 +1488,40 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(
; GFX11-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f64_to_f16_afn:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f64_to_f16_afn:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -1769,6 +1739,38 @@ define amdgpu_kernel void @fptrunc_v2f32_to_v2f16(
; GFX11-GISEL-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_v2f32_to_v2f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b64 v[0:1], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_v2f32_to_v2f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_f16_f32 v0, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -3014,6 +3016,225 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_v2f64_to_v2f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v2, s3, v2
+; GFX1250-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-SDAG-FAKE16-NEXT: s_sub_co_i32 s4, 0x3f1, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
+; GFX1250-SDAG-FAKE16-NEXT: v_med3_i32 v3, s4, 0, 13
+; GFX1250-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_addk_co_i32 s3, 0xfc10
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_add_co_i32 s5, s5, s8
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
+; GFX1250-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s9, s8, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s3, 0x40f
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s9, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s5, s4, 0x1ff
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s10, s4, 8
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s5, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_bfe_u32 s5, s4, 0xb0014
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s10, s10, 0xffe
+; GFX1250-SDAG-FAKE16-NEXT: s_sub_co_i32 s9, 0x3f1, s5
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
+; GFX1250-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_med3_i32 v1, s9, 0, 13
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
+; GFX1250-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s11, v1
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s9, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s9, s10, s9
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, 0x1000
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s12, s10, s11
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s11, s12, s11
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s11, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_addk_co_i32 s5, 0xfc10
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s3, s12, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_lshl_b32 s10, s5, 12
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s10, s3, 7
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s10, 5
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s11, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s10, 3
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s10, 1, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s10, s10, s11
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_add_co_i32 s3, s3, s10
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 31
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s9, 0
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s8, s8, 0x7c00
+; GFX1250-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s5, 0x40f
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s8, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_lshr_b32 s4, s4, 16
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: s_and_b32 s4, s4, 0x8000
+; GFX1250-SDAG-FAKE16-NEXT: s_or_b32 s3, s4, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s3, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_v2f64_to_v2f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[4:7], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s5, 0x1ff
+; GFX1250-GISEL-FAKE16-NEXT: s_bfe_u32 s2, s5, 0xb0014
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s5, 8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s4, s8, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_addk_co_i32 s2, 0xfc10
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0xffe
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s4, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s3, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s4, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_sub_co_i32 s8, 1, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s10, s3, 0x1000
+; GFX1250-GISEL-FAKE16-NEXT: s_max_i32 s8, s8, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s9, s2, 12
+; GFX1250-GISEL-FAKE16-NEXT: s_min_i32 s8, s8, 13
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s4, s4, 9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s11, s10, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s3, s9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s8, s11, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s4, s4, 0x7c00
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s8, s10
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s8, s11, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lt_i32 s2, 1
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, s8, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s3, 7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s8, s9, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_add_co_i32 s3, s3, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s2, 30
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, 0x7c00, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s2, 0x40f
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, s4, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s5, 16
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s7, 0x1ff
+; GFX1250-GISEL-FAKE16-NEXT: s_bfe_u32 s4, s7, 0xb0014
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s5, s7, 8
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s8, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_addk_co_i32 s4, 0xfc10
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s3, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s5, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_sub_co_i32 s6, 1, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s9, s3, 0x1000
+; GFX1250-GISEL-FAKE16-NEXT: s_max_i32 s6, s6, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s8, s4, 12
+; GFX1250-GISEL-FAKE16-NEXT: s_min_i32 s6, s6, 13
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s5, s5, 9
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s10, s9, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s3, s8
+; GFX1250-GISEL-FAKE16-NEXT: s_lshl_b32 s6, s10, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s5, s5, 0x7c00
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s6, s9
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s10, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lt_i32 s4, 1
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, s6, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s6, s3, 7
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s6, 3
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s6, 5
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s6, 1, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s8, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_add_co_i32 s3, s3, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_gt_i32 s4, 30
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, 0x7c00, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s4, 0x40f
+; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, s5, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s4, s7, 16
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s4, s4, 0x8000
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s4, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s3
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -3026,106 +3247,25 @@ entry:
define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; SI-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; SI-SDAG: ; %bb.0: ; %entry
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
-; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s2, -1
-; SI-SDAG-NEXT: s_mov_b32 s10, s2
-; SI-SDAG-NEXT: s_mov_b32 s11, s3
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s6, -1
+; SI-SDAG-NEXT: s_mov_b32 s10, s6
+; SI-SDAG-NEXT: s_mov_b32 s11, s7
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s8, s6
-; SI-SDAG-NEXT: s_mov_b32 s9, s7
+; SI-SDAG-NEXT: s_mov_b32 s8, s2
+; SI-SDAG-NEXT: s_mov_b32 s9, s3
; SI-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; SI-SDAG-NEXT: s_movk_i32 s0, 0x7e00
+; SI-SDAG-NEXT: s_mov_b32 s4, s0
+; SI-SDAG-NEXT: s_mov_b32 s5, s1
; SI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; SI-SDAG-NEXT: v_readfirstlane_b32 s1, v3
-; SI-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; SI-SDAG-NEXT: s_and_b32 s7, s1, 0x1ff
-; SI-SDAG-NEXT: s_lshr_b32 s8, s1, 8
-; SI-SDAG-NEXT: s_bfe_u32 s9, s1, 0xb0014
-; SI-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; SI-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; SI-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; SI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; SI-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; SI-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; SI-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; SI-SDAG-NEXT: s_or_b32 s7, s7, s8
-; SI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; SI-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; SI-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; SI-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; SI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; SI-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; SI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; SI-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; SI-SDAG-NEXT: s_or_b32 s10, s7, s10
-; SI-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; SI-SDAG-NEXT: s_and_b32 s10, s8, 7
-; SI-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; SI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; SI-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; SI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; SI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; SI-SDAG-NEXT: s_or_b32 s10, s10, s11
-; SI-SDAG-NEXT: s_add_i32 s8, s8, s10
-; SI-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; SI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; SI-SDAG-NEXT: s_cselect_b32 s7, s0, 0x7c00
-; SI-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; SI-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; SI-SDAG-NEXT: s_lshr_b32 s1, s1, 16
-; SI-SDAG-NEXT: s_and_b32 s8, s6, 0x1ff
-; SI-SDAG-NEXT: s_lshr_b32 s9, s6, 8
-; SI-SDAG-NEXT: s_bfe_u32 s10, s6, 0xb0014
-; SI-SDAG-NEXT: s_and_b32 s1, s1, 0x8000
-; SI-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; SI-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; SI-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; SI-SDAG-NEXT: s_or_b32 s1, s1, s7
-; SI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; SI-SDAG-NEXT: s_lshl_b32 s1, s1, 16
-; SI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; SI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; SI-SDAG-NEXT: s_or_b32 s7, s8, s7
-; SI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; SI-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; SI-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; SI-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; SI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; SI-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; SI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; SI-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; SI-SDAG-NEXT: s_or_b32 s9, s7, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; SI-SDAG-NEXT: s_and_b32 s9, s8, 7
-; SI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; SI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; SI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; SI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; SI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; SI-SDAG-NEXT: s_or_b32 s9, s9, s11
-; SI-SDAG-NEXT: s_add_i32 s8, s8, s9
-; SI-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; SI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; SI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, 0x7c00
-; SI-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; SI-SDAG-NEXT: s_cselect_b32 s0, s0, s8
-; SI-SDAG-NEXT: s_lshr_b32 s6, s6, 16
-; SI-SDAG-NEXT: s_and_b32 s6, s6, 0x8000
-; SI-SDAG-NEXT: s_or_b32 s0, s6, s0
-; SI-SDAG-NEXT: s_and_b32 s0, s0, 0xffff
-; SI-SDAG-NEXT: s_or_b32 s6, s0, s1
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
-; SI-SDAG-NEXT: v_mov_b32_e32 v0, s6
-; SI-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; SI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v2
+; SI-SDAG-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-SDAG-NEXT: s_endpgm
;
; SI-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3147,106 +3287,24 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
;
; VI-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; VI-SDAG: ; %bb.0: ; %entry
-; VI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SDAG-NEXT: s_mov_b32 s10, s2
-; VI-SDAG-NEXT: s_mov_b32 s11, s3
+; VI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s6, -1
+; VI-SDAG-NEXT: s_mov_b32 s10, s6
+; VI-SDAG-NEXT: s_mov_b32 s11, s7
; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s8, s6
-; VI-SDAG-NEXT: s_mov_b32 s9, s7
+; VI-SDAG-NEXT: s_mov_b32 s8, s2
+; VI-SDAG-NEXT: s_mov_b32 s9, s3
; VI-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; VI-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; VI-SDAG-NEXT: s_mov_b32 s4, s0
+; VI-SDAG-NEXT: s_mov_b32 s5, s1
; VI-SDAG-NEXT: s_waitcnt vmcnt(0)
-; VI-SDAG-NEXT: v_readfirstlane_b32 s4, v3
-; VI-SDAG-NEXT: s_and_b32 s7, s4, 0x1ff
-; VI-SDAG-NEXT: v_readfirstlane_b32 s5, v1
-; VI-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; VI-SDAG-NEXT: s_lshr_b32 s8, s4, 8
-; VI-SDAG-NEXT: s_bfe_u32 s9, s4, 0xb0014
-; VI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; VI-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; VI-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; VI-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; VI-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; VI-SDAG-NEXT: s_or_b32 s7, s7, s8
-; VI-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; VI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; VI-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; VI-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; VI-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; VI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; VI-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; VI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; VI-SDAG-NEXT: s_or_b32 s10, s7, s10
-; VI-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; VI-SDAG-NEXT: s_and_b32 s10, s8, 7
-; VI-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; VI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; VI-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; VI-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; VI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; VI-SDAG-NEXT: s_or_b32 s10, s10, s11
-; VI-SDAG-NEXT: s_add_i32 s8, s8, s10
-; VI-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; VI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; VI-SDAG-NEXT: s_cselect_b32 s7, s6, 0x7c00
-; VI-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; VI-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; VI-SDAG-NEXT: s_and_b32 s8, s5, 0x1ff
-; VI-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; VI-SDAG-NEXT: s_lshr_b32 s4, s4, 16
-; VI-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-SDAG-NEXT: s_lshr_b32 s9, s5, 8
-; VI-SDAG-NEXT: s_bfe_u32 s10, s5, 0xb0014
-; VI-SDAG-NEXT: s_and_b32 s4, s4, 0x8000
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; VI-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; VI-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; VI-SDAG-NEXT: s_or_b32 s4, s4, s7
-; VI-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; VI-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; VI-SDAG-NEXT: s_or_b32 s7, s8, s7
-; VI-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; VI-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; VI-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; VI-SDAG-NEXT: s_lshl_b32 s4, s4, 16
-; VI-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; VI-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; VI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; VI-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; VI-SDAG-NEXT: s_or_b32 s8, s11, s8
-; VI-SDAG-NEXT: s_or_b32 s9, s7, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; VI-SDAG-NEXT: s_and_b32 s9, s8, 7
-; VI-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; VI-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; VI-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; VI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; VI-SDAG-NEXT: s_or_b32 s9, s9, s11
-; VI-SDAG-NEXT: s_add_i32 s8, s8, s9
-; VI-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; VI-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; VI-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; VI-SDAG-NEXT: s_cselect_b32 s6, s6, 0x7c00
-; VI-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; VI-SDAG-NEXT: s_cselect_b32 s6, s6, s8
-; VI-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; VI-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; VI-SDAG-NEXT: s_or_b32 s5, s5, s6
-; VI-SDAG-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-SDAG-NEXT: s_or_b32 s4, s5, s4
-; VI-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; VI-SDAG-NEXT: v_cvt_f16_f32_sdwa v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; VI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; VI-SDAG-NEXT: v_or_b32_e32 v0, v0, v1
+; VI-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3267,104 +3325,24 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
;
; GFX9-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; GFX9-SDAG: ; %bb.0: ; %entry
-; GFX9-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX9-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX9-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX9-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX9-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX9-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX9-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX9-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX9-SDAG-NEXT: s_mov_b32 s11, s7
; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX9-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX9-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
-; GFX9-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX9-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX9-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX9-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX9-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX9-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GFX9-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX9-SDAG-NEXT: s_mov_b32 s5, s1
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s5, v3
-; GFX9-SDAG-NEXT: s_and_b32 s7, s5, 0x1ff
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX9-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; GFX9-SDAG-NEXT: s_lshr_b32 s8, s5, 8
-; GFX9-SDAG-NEXT: s_bfe_u32 s9, s5, 0xb0014
-; GFX9-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX9-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; GFX9-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GFX9-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; GFX9-SDAG-NEXT: s_or_b32 s7, s7, s8
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; GFX9-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX9-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; GFX9-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX9-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; GFX9-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; GFX9-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX9-SDAG-NEXT: s_or_b32 s10, s7, s10
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; GFX9-SDAG-NEXT: s_and_b32 s10, s8, 7
-; GFX9-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; GFX9-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX9-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; GFX9-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX9-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX9-SDAG-NEXT: s_or_b32 s10, s10, s11
-; GFX9-SDAG-NEXT: s_add_i32 s8, s8, s10
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s4, 0x7c00
-; GFX9-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; GFX9-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; GFX9-SDAG-NEXT: s_and_b32 s8, s6, 0x1ff
-; GFX9-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; GFX9-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX9-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX9-SDAG-NEXT: s_lshr_b32 s9, s6, 8
-; GFX9-SDAG-NEXT: s_bfe_u32 s10, s6, 0xb0014
-; GFX9-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; GFX9-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; GFX9-SDAG-NEXT: s_or_b32 s5, s5, s7
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX9-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX9-SDAG-NEXT: s_or_b32 s7, s8, s7
-; GFX9-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX9-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX9-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX9-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; GFX9-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; GFX9-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX9-SDAG-NEXT: s_or_b32 s9, s7, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; GFX9-SDAG-NEXT: s_and_b32 s9, s8, 7
-; GFX9-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX9-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX9-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX9-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX9-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX9-SDAG-NEXT: s_or_b32 s9, s9, s11
-; GFX9-SDAG-NEXT: s_add_i32 s8, s8, s9
-; GFX9-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; GFX9-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX9-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX9-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; GFX9-SDAG-NEXT: s_cselect_b32 s4, s4, s8
-; GFX9-SDAG-NEXT: s_lshr_b32 s6, s6, 16
-; GFX9-SDAG-NEXT: s_and_b32 s6, s6, 0x8000
-; GFX9-SDAG-NEXT: s_or_b32 s4, s6, s4
-; GFX9-SDAG-NEXT: s_pack_ll_b32_b16 s4, s4, s5
-; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX9-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX9-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX9-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX9-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v2
+; GFX9-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX9-SDAG-NEXT: v_pack_b32_f16 v0, v0, v1
+; GFX9-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX9-SDAG-NEXT: s_endpgm
;
; GFX9-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3385,104 +3363,22 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
;
; GFX950-SDAG-LABEL: fptrunc_v2f64_to_v2f16_afn:
; GFX950-SDAG: ; %bb.0: ; %entry
-; GFX950-SDAG-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX950-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; GFX950-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX950-SDAG-NEXT: s_mov_b32 s6, s2
-; GFX950-SDAG-NEXT: s_mov_b32 s7, s3
+; GFX950-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX950-SDAG-NEXT: s_mov_b32 s7, 0xf000
+; GFX950-SDAG-NEXT: s_mov_b32 s6, -1
+; GFX950-SDAG-NEXT: s_mov_b32 s10, s6
+; GFX950-SDAG-NEXT: s_mov_b32 s11, s7
; GFX950-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX950-SDAG-NEXT: s_mov_b32 s4, s10
-; GFX950-SDAG-NEXT: s_mov_b32 s5, s11
-; GFX950-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[4:7], 0
-; GFX950-SDAG-NEXT: s_mov_b32 s0, s8
-; GFX950-SDAG-NEXT: s_mov_b32 s1, s9
-; GFX950-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; GFX950-SDAG-NEXT: s_mov_b32 s8, s2
+; GFX950-SDAG-NEXT: s_mov_b32 s9, s3
+; GFX950-SDAG-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GFX950-SDAG-NEXT: s_mov_b32 s4, s0
+; GFX950-SDAG-NEXT: s_mov_b32 s5, s1
; GFX950-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s5, v3
-; GFX950-SDAG-NEXT: s_and_b32 s7, s5, 0x1ff
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX950-SDAG-NEXT: v_or_b32_e32 v1, s7, v2
-; GFX950-SDAG-NEXT: s_lshr_b32 s8, s5, 8
-; GFX950-SDAG-NEXT: s_bfe_u32 s9, s5, 0xb0014
-; GFX950-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX950-SDAG-NEXT: s_and_b32 s7, s8, 0xffe
-; GFX950-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s9
-; GFX950-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
-; GFX950-SDAG-NEXT: v_med3_i32 v2, s8, 0, 13
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s8, v1
-; GFX950-SDAG-NEXT: s_or_b32 s7, s7, s8
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s10, v2
-; GFX950-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX950-SDAG-NEXT: s_lshr_b32 s11, s8, s10
-; GFX950-SDAG-NEXT: s_lshl_b32 s10, s11, s10
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s10, s8
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX950-SDAG-NEXT: s_addk_i32 s9, 0xfc10
-; GFX950-SDAG-NEXT: s_lshl_b32 s10, s9, 12
-; GFX950-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX950-SDAG-NEXT: s_or_b32 s10, s7, s10
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s9, 1
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, s10
-; GFX950-SDAG-NEXT: s_and_b32 s10, s8, 7
-; GFX950-SDAG-NEXT: s_cmp_gt_i32 s10, 5
-; GFX950-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX950-SDAG-NEXT: s_cmp_eq_u32 s10, 3
-; GFX950-SDAG-NEXT: s_cselect_b32 s10, 1, 0
-; GFX950-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX950-SDAG-NEXT: s_or_b32 s10, s10, s11
-; GFX950-SDAG-NEXT: s_add_i32 s8, s8, s10
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s9, 31
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s4, 0x7c00
-; GFX950-SDAG-NEXT: s_cmpk_eq_i32 s9, 0x40f
-; GFX950-SDAG-NEXT: s_cselect_b32 s7, s7, s8
-; GFX950-SDAG-NEXT: s_and_b32 s8, s6, 0x1ff
-; GFX950-SDAG-NEXT: v_or_b32_e32 v0, s8, v0
-; GFX950-SDAG-NEXT: s_lshr_b32 s5, s5, 16
-; GFX950-SDAG-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX950-SDAG-NEXT: s_lshr_b32 s9, s6, 8
-; GFX950-SDAG-NEXT: s_bfe_u32 s10, s6, 0xb0014
-; GFX950-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; GFX950-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX950-SDAG-NEXT: s_and_b32 s8, s9, 0xffe
-; GFX950-SDAG-NEXT: s_sub_i32 s9, 0x3f1, s10
-; GFX950-SDAG-NEXT: s_or_b32 s5, s5, s7
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s7, v0
-; GFX950-SDAG-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX950-SDAG-NEXT: s_or_b32 s7, s8, s7
-; GFX950-SDAG-NEXT: v_readfirstlane_b32 s9, v1
-; GFX950-SDAG-NEXT: s_or_b32 s8, s7, 0x1000
-; GFX950-SDAG-NEXT: s_lshr_b32 s11, s8, s9
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s11, s9
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s9, s8
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; GFX950-SDAG-NEXT: s_addk_i32 s10, 0xfc10
-; GFX950-SDAG-NEXT: s_lshl_b32 s9, s10, 12
-; GFX950-SDAG-NEXT: s_or_b32 s8, s11, s8
-; GFX950-SDAG-NEXT: s_or_b32 s9, s7, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s10, 1
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, s9
-; GFX950-SDAG-NEXT: s_and_b32 s9, s8, 7
-; GFX950-SDAG-NEXT: s_cmp_gt_i32 s9, 5
-; GFX950-SDAG-NEXT: s_cselect_b32 s11, 1, 0
-; GFX950-SDAG-NEXT: s_cmp_eq_u32 s9, 3
-; GFX950-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; GFX950-SDAG-NEXT: s_lshr_b32 s8, s8, 2
-; GFX950-SDAG-NEXT: s_or_b32 s9, s9, s11
-; GFX950-SDAG-NEXT: s_add_i32 s8, s8, s9
-; GFX950-SDAG-NEXT: s_cmp_lt_i32 s10, 31
-; GFX950-SDAG-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX950-SDAG-NEXT: s_cmp_lg_u32 s7, 0
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; GFX950-SDAG-NEXT: s_cmpk_eq_i32 s10, 0x40f
-; GFX950-SDAG-NEXT: s_cselect_b32 s4, s4, s8
-; GFX950-SDAG-NEXT: s_lshr_b32 s6, s6, 16
-; GFX950-SDAG-NEXT: s_and_b32 s6, s6, 0x8000
-; GFX950-SDAG-NEXT: s_or_b32 s4, s6, s4
-; GFX950-SDAG-NEXT: s_pack_ll_b32_b16 s4, s4, s5
-; GFX950-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; GFX950-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX950-SDAG-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX950-SDAG-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX950-SDAG-NEXT: v_cvt_pk_f16_f32 v0, v0, v2
+; GFX950-SDAG-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GFX950-SDAG-NEXT: s_endpgm
;
; GFX950-GISEL-LABEL: fptrunc_v2f64_to_v2f16_afn:
@@ -3511,109 +3407,17 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s9, s3
+; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, s0
; GFX11-SDAG-TRUE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], 0
-; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s2, v3
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v2, s3, v2
-; GFX11-SDAG-TRUE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-TRUE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-SDAG-TRUE16-NEXT: v_med3_i32 v3, s4, 0, 13
-; GFX11-SDAG-TRUE16-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s8, v3
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s4, v2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-TRUE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s4, v1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s9, s8, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s9, s5
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s5, s4, 0x1ff
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s10, s4, 8
-; GFX11-SDAG-TRUE16-NEXT: v_or_b32_e32 v0, s5, v0
-; GFX11-SDAG-TRUE16-NEXT: s_bfe_u32 s5, s4, 0xb0014
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s10, s10, 0xffe
-; GFX11-SDAG-TRUE16-NEXT: s_sub_i32 s9, 0x3f1, s5
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s2, s2, 16
-; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-TRUE16-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s2, s2, 0x8000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s11, v1
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_readfirstlane_b32 s9, v0
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s9, s10, s9
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s10, s9, 0x1000
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s12, s10, s11
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s11, s12, s11
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s11, s10
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_addk_i32 s5, 0xfc10
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s3, s12, s3
-; GFX11-SDAG-TRUE16-NEXT: s_lshl_b32 s10, s5, 12
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s10, s9, s10
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s5, 1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s3, s10
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s10, s3, 7
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_gt_i32 s10, 5
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s11, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_eq_u32 s10, 3
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s10, 1, 0
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s3, s3, 2
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_add_i32 s3, s3, s10
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lt_i32 s5, 31
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s3, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmp_lg_u32 s9, 0
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX11-SDAG-TRUE16-NEXT: s_cmpk_eq_i32 s5, 0x40f
; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-TRUE16-NEXT: s_cselect_b32 s3, s8, s3
-; GFX11-SDAG-TRUE16-NEXT: s_lshr_b32 s4, s4, 16
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: s_and_b32 s4, s4, 0x8000
-; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s3, s4, s3
-; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s3, s2
-; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v1, v[0:1]
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v2
+; GFX11-SDAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.h, v1
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_pack_b32_f16 v0, v0.h, v0.l
; GFX11-SDAG-TRUE16-NEXT: buffer_store_b32 v0, off, s[4:7], 0
; GFX11-SDAG-TRUE16-NEXT: s_endpgm
;
@@ -3627,109 +3431,17 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; GFX11-SDAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
; GFX11-SDAG-FAKE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], 0
-; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s2, v3
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s3, s2, 0x1ff
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s2, 8
-; GFX11-SDAG-FAKE16-NEXT: v_or_b32_e32 v2, s3, v2
-; GFX11-SDAG-FAKE16-NEXT: s_bfe_u32 s3, s2, 0xb0014
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-SDAG-FAKE16-NEXT: s_sub_i32 s4, 0x3f1, s3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
-; GFX11-SDAG-FAKE16-NEXT: v_med3_i32 v3, s4, 0, 13
-; GFX11-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s8, v3
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s4, s5, s4
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s9, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s9, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s8, s5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_addk_i32 s3, 0xfc10
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s5, s9, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s8, s3, 12
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s4, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s8, s5, 7
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s8, 5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s9, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s8, 3
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s8, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_add_i32 s5, s5, s8
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s3, 31
-; GFX11-SDAG-FAKE16-NEXT: s_movk_i32 s8, 0x7e00
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s4, v1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s9, s8, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s9, s5
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s5, s4, 0x1ff
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s10, s4, 8
-; GFX11-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, s5, v0
-; GFX11-SDAG-FAKE16-NEXT: s_bfe_u32 s5, s4, 0xb0014
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s10, s10, 0xffe
-; GFX11-SDAG-FAKE16-NEXT: s_sub_i32 s9, 0x3f1, s5
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s2, s2, 16
-; GFX11-SDAG-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-SDAG-FAKE16-NEXT: v_med3_i32 v1, s9, 0, 13
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s2, s2, 0x8000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s2, s2, s3
-; GFX11-SDAG-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s11, v1
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_readfirstlane_b32 s9, v0
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s9, s10, s9
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, 0x1000
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s12, s10, s11
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s11, s12, s11
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s11, s10
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_addk_i32 s5, 0xfc10
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s3, s12, s3
-; GFX11-SDAG-FAKE16-NEXT: s_lshl_b32 s10, s5, 12
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s10, s9, s10
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, s10
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s10, s3, 7
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_gt_i32 s10, 5
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s11, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_eq_u32 s10, 3
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s10, 1, 0
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s3, s3, 2
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_add_i32 s3, s3, s10
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lt_i32 s5, 31
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s3, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmp_lg_u32 s9, 0
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s8, s8, 0x7c00
-; GFX11-SDAG-FAKE16-NEXT: s_cmpk_eq_i32 s5, 0x40f
; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
-; GFX11-SDAG-FAKE16-NEXT: s_cselect_b32 s3, s8, s3
-; GFX11-SDAG-FAKE16-NEXT: s_lshr_b32 s4, s4, 16
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: s_and_b32 s4, s4, 0x8000
-; GFX11-SDAG-FAKE16-NEXT: s_or_b32 s3, s4, s3
-; GFX11-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
-; GFX11-SDAG-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s3, s2
-; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SDAG-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v1, v2
+; GFX11-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX11-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SDAG-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
; GFX11-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], 0
; GFX11-SDAG-FAKE16-NEXT: s_endpgm
;
@@ -3768,6 +3480,46 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16_afn(
; GFX11-GISEL-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_v2f64_to_v2f16_afn:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b128 v[0:3], off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, v[0:1]
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_f16_f32 v0, v0, v2
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_v2f64_to_v2f16_afn:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[4:7], s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, s[4:5]
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f32_f64_e32 v1, s[6:7]
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_pack_b32_f16 v0, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -3957,6 +3709,42 @@ define amdgpu_kernel void @fneg_fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fneg_fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fneg_fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_xor_b32 s2, s2, 0x80000000
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -4147,6 +3935,42 @@ define amdgpu_kernel void @fabs_fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fabs_fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fabs_fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_bitset0_b32 s2, 31
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) {
entry:
@@ -4337,6 +4161,42 @@ define amdgpu_kernel void @fneg_fabs_fptrunc_f32_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fneg_fabs_fptrunc_f32_to_f16:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_or_b32_e32 v0, 0x80000000, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fneg_fabs_fptrunc_f32_to_f16:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_bitset1_b32 s2, 31
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
@@ -4536,6 +4396,42 @@ define amdgpu_kernel void @fptrunc_f32_to_f16_zext_i32(
; GFX11-GISEL-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16_zext_i32:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16_zext_i32:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s2, 0xffff, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
@@ -4735,6 +4631,45 @@ define amdgpu_kernel void @fptrunc_fabs_f32_to_f16_zext_i32(
; GFX11-GISEL-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_fabs_f32_to_f16_zext_i32:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_fabs_f32_to_f16_zext_i32:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_bitset0_b32 s2, 31
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s2, 0xffff, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
@@ -4943,6 +4878,42 @@ define amdgpu_kernel void @fptrunc_f32_to_f16_sext_i32(
; GFX11-GISEL-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 16
; GFX11-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-GISEL-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: fptrunc_f32_to_f16_sext_i32:
+; GFX1250-SDAG-FAKE16: ; %bb.0: ; %entry
+; GFX1250-SDAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s6, -1
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s7, 0x31016000
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s10, s6
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s11, s7
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s8, s2
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s9, s3
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s4, s0
+; GFX1250-SDAG-FAKE16-NEXT: buffer_load_b32 v0, off, s[8:11], null
+; GFX1250-SDAG-FAKE16-NEXT: s_mov_b32 s5, s1
+; GFX1250-SDAG-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-FAKE16-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX1250-SDAG-FAKE16-NEXT: buffer_store_b32 v0, off, s[4:7], null
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: fptrunc_f32_to_f16_sext_i32:
+; GFX1250-GISEL-FAKE16: ; %bb.0: ; %entry
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_load_b32 s2, s[2:3], 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-FAKE16-NEXT: s_cvt_f16_f32 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_sext_i32_i16 s2, s2
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_mov_b32 s2, -1
+; GFX1250-GISEL-FAKE16-NEXT: buffer_store_b32 v0, off, s[0:3], null
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
ptr addrspace(1) %r,
ptr addrspace(1) %a) #0 {
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.ll
index 4f8eab1..5d31177 100644
--- a/llvm/test/CodeGen/AMDGPU/fptrunc.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.ll
@@ -226,59 +226,59 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
-; VI-SAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; VI-SAFE-SDAG: ; %bb.0:
-; VI-SAFE-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s4, s7, 8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s4, 0xffe
-; VI-SAFE-SDAG-NEXT: s_and_b32 s4, s7, 0x1ff
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s6
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b64 s[4:5], -1, 0
-; VI-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s4, v0
-; VI-SAFE-SDAG-NEXT: s_bfe_u32 s6, s7, 0xb0014
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s8, s4
-; VI-SAFE-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s6
-; VI-SAFE-SDAG-NEXT: v_med3_i32 v0, s8, 0, 13
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s8, v0
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s9, s5, s8
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s9, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s8, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; VI-SAFE-SDAG-NEXT: s_addk_i32 s6, 0xfc10
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s6, 12
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s9, s5
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s4, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 1
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s5, 7
-; VI-SAFE-SDAG-NEXT: s_cmp_gt_i32 s8, 5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SAFE-SDAG-NEXT: s_cmp_eq_u32 s8, 3
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s8, s9
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; VI-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 31
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_movk_i32 s4, 0x7e00
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s6, 0x40f
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, s5
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s7, 16
-; VI-SAFE-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s5, s4
-; VI-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-SAFE-SDAG-NEXT: s_endpgm
+; VI-SDAG-LABEL: fptrunc_f64_to_f16:
+; VI-SDAG: ; %bb.0:
+; VI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
+; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s2, -1
+; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SDAG-NEXT: s_mov_b32 s0, s4
+; VI-SDAG-NEXT: s_lshr_b32 s4, s7, 8
+; VI-SDAG-NEXT: s_and_b32 s8, s4, 0xffe
+; VI-SDAG-NEXT: s_and_b32 s4, s7, 0x1ff
+; VI-SDAG-NEXT: s_or_b32 s4, s4, s6
+; VI-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; VI-SDAG-NEXT: s_mov_b32 s1, s5
+; VI-SDAG-NEXT: s_cselect_b64 s[4:5], -1, 0
+; VI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; VI-SDAG-NEXT: v_readfirstlane_b32 s4, v0
+; VI-SDAG-NEXT: s_bfe_u32 s6, s7, 0xb0014
+; VI-SDAG-NEXT: s_or_b32 s4, s8, s4
+; VI-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s6
+; VI-SDAG-NEXT: v_med3_i32 v0, s8, 0, 13
+; VI-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
+; VI-SDAG-NEXT: v_readfirstlane_b32 s8, v0
+; VI-SDAG-NEXT: s_lshr_b32 s9, s5, s8
+; VI-SDAG-NEXT: s_lshl_b32 s8, s9, s8
+; VI-SDAG-NEXT: s_cmp_lg_u32 s8, s5
+; VI-SDAG-NEXT: s_cselect_b32 s5, 1, 0
+; VI-SDAG-NEXT: s_addk_i32 s6, 0xfc10
+; VI-SDAG-NEXT: s_lshl_b32 s8, s6, 12
+; VI-SDAG-NEXT: s_or_b32 s5, s9, s5
+; VI-SDAG-NEXT: s_or_b32 s8, s4, s8
+; VI-SDAG-NEXT: s_cmp_lt_i32 s6, 1
+; VI-SDAG-NEXT: s_cselect_b32 s5, s5, s8
+; VI-SDAG-NEXT: s_and_b32 s8, s5, 7
+; VI-SDAG-NEXT: s_cmp_gt_i32 s8, 5
+; VI-SDAG-NEXT: s_cselect_b32 s9, 1, 0
+; VI-SDAG-NEXT: s_cmp_eq_u32 s8, 3
+; VI-SDAG-NEXT: s_cselect_b32 s8, 1, 0
+; VI-SDAG-NEXT: s_or_b32 s8, s8, s9
+; VI-SDAG-NEXT: s_lshr_b32 s5, s5, 2
+; VI-SDAG-NEXT: s_add_i32 s5, s5, s8
+; VI-SDAG-NEXT: s_cmp_lt_i32 s6, 31
+; VI-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; VI-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; VI-SDAG-NEXT: s_movk_i32 s4, 0x7e00
+; VI-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
+; VI-SDAG-NEXT: s_cmpk_eq_i32 s6, 0x40f
+; VI-SDAG-NEXT: s_cselect_b32 s4, s4, s5
+; VI-SDAG-NEXT: s_lshr_b32 s5, s7, 16
+; VI-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
+; VI-SDAG-NEXT: s_or_b32 s4, s5, s4
+; VI-SDAG-NEXT: v_mov_b32_e32 v0, s4
+; VI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_f64_to_f16:
; VI-GISEL: ; %bb.0:
@@ -331,68 +331,57 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; VI-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; VI-GISEL-NEXT: s_endpgm
;
-; VI-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; VI-UNSAFE-SDAG: ; %bb.0:
-; VI-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; VI-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-UNSAFE-SDAG-NEXT: s_endpgm
-;
-; GFX10-SAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; GFX10-SAFE-SDAG: ; %bb.0:
-; GFX10-SAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX10-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX10-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX10-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX10-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX10-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX10-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-SAFE-SDAG-NEXT: s_endpgm
+; GFX10-SDAG-LABEL: fptrunc_f64_to_f16:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
+; GFX10-SDAG-NEXT: s_lshr_b32 s5, s3, 8
+; GFX10-SDAG-NEXT: s_or_b32 s2, s4, s2
+; GFX10-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
+; GFX10-SDAG-NEXT: s_cmp_lg_u32 s2, 0
+; GFX10-SDAG-NEXT: s_cselect_b32 s2, -1, 0
+; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX10-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
+; GFX10-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
+; GFX10-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
+; GFX10-SDAG-NEXT: v_readfirstlane_b32 s5, v0
+; GFX10-SDAG-NEXT: v_readfirstlane_b32 s6, v1
+; GFX10-SDAG-NEXT: s_or_b32 s4, s4, s5
+; GFX10-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX10-SDAG-NEXT: s_lshr_b32 s7, s5, s6
+; GFX10-SDAG-NEXT: s_lshl_b32 s6, s7, s6
+; GFX10-SDAG-NEXT: s_cmp_lg_u32 s6, s5
+; GFX10-SDAG-NEXT: s_cselect_b32 s5, 1, 0
+; GFX10-SDAG-NEXT: s_addk_i32 s2, 0xfc10
+; GFX10-SDAG-NEXT: s_or_b32 s5, s7, s5
+; GFX10-SDAG-NEXT: s_lshl_b32 s6, s2, 12
+; GFX10-SDAG-NEXT: s_or_b32 s6, s4, s6
+; GFX10-SDAG-NEXT: s_cmp_lt_i32 s2, 1
+; GFX10-SDAG-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-SDAG-NEXT: s_and_b32 s6, s5, 7
+; GFX10-SDAG-NEXT: s_cmp_gt_i32 s6, 5
+; GFX10-SDAG-NEXT: s_cselect_b32 s7, 1, 0
+; GFX10-SDAG-NEXT: s_cmp_eq_u32 s6, 3
+; GFX10-SDAG-NEXT: s_cselect_b32 s6, 1, 0
+; GFX10-SDAG-NEXT: s_lshr_b32 s5, s5, 2
+; GFX10-SDAG-NEXT: s_or_b32 s6, s6, s7
+; GFX10-SDAG-NEXT: s_add_i32 s5, s5, s6
+; GFX10-SDAG-NEXT: s_cmp_lt_i32 s2, 31
+; GFX10-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; GFX10-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX10-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
+; GFX10-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
+; GFX10-SDAG-NEXT: s_cselect_b32 s2, s4, s5
+; GFX10-SDAG-NEXT: s_lshr_b32 s3, s3, 16
+; GFX10-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX10-SDAG-NEXT: s_or_b32 s2, s3, s2
+; GFX10-SDAG-NEXT: s_mov_b32 s3, 0x31016000
+; GFX10-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX10-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX10-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX10-SDAG-NEXT: s_endpgm
;
; GFX10-GISEL-LABEL: fptrunc_f64_to_f16:
; GFX10-GISEL: ; %bb.0:
@@ -445,76 +434,65 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; GFX10-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX10-GISEL-NEXT: s_endpgm
;
-; GFX10-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; GFX10-UNSAFE-SDAG: ; %bb.0:
-; GFX10-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX10-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-UNSAFE-SDAG-NEXT: s_endpgm
-;
-; GFX11-SAFE-SDAG-LABEL: fptrunc_f64_to_f16:
-; GFX11-SAFE-SDAG: ; %bb.0:
-; GFX11-SAFE-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX11-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX11-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX11-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX11-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
-; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX11-SAFE-SDAG-NEXT: buffer_store_b16 v0, off, s[0:3], 0
-; GFX11-SAFE-SDAG-NEXT: s_endpgm
+; GFX11-SDAG-LABEL: fptrunc_f64_to_f16:
+; GFX11-SDAG: ; %bb.0:
+; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
+; GFX11-SDAG-NEXT: s_lshr_b32 s5, s3, 8
+; GFX11-SDAG-NEXT: s_or_b32 s2, s4, s2
+; GFX11-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
+; GFX11-SDAG-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-SDAG-NEXT: s_cselect_b32 s2, -1, 0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX11-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
+; GFX11-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
+; GFX11-SDAG-NEXT: v_readfirstlane_b32 s5, v0
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: v_readfirstlane_b32 s6, v1
+; GFX11-SDAG-NEXT: s_or_b32 s4, s4, s5
+; GFX11-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_lshr_b32 s7, s5, s6
+; GFX11-SDAG-NEXT: s_lshl_b32 s6, s7, s6
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_cmp_lg_u32 s6, s5
+; GFX11-SDAG-NEXT: s_cselect_b32 s5, 1, 0
+; GFX11-SDAG-NEXT: s_addk_i32 s2, 0xfc10
+; GFX11-SDAG-NEXT: s_or_b32 s5, s7, s5
+; GFX11-SDAG-NEXT: s_lshl_b32 s6, s2, 12
+; GFX11-SDAG-NEXT: s_or_b32 s6, s4, s6
+; GFX11-SDAG-NEXT: s_cmp_lt_i32 s2, 1
+; GFX11-SDAG-NEXT: s_cselect_b32 s5, s5, s6
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_b32 s6, s5, 7
+; GFX11-SDAG-NEXT: s_cmp_gt_i32 s6, 5
+; GFX11-SDAG-NEXT: s_cselect_b32 s7, 1, 0
+; GFX11-SDAG-NEXT: s_cmp_eq_u32 s6, 3
+; GFX11-SDAG-NEXT: s_cselect_b32 s6, 1, 0
+; GFX11-SDAG-NEXT: s_lshr_b32 s5, s5, 2
+; GFX11-SDAG-NEXT: s_or_b32 s6, s6, s7
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_add_i32 s5, s5, s6
+; GFX11-SDAG-NEXT: s_cmp_lt_i32 s2, 31
+; GFX11-SDAG-NEXT: s_movk_i32 s6, 0x7e00
+; GFX11-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
+; GFX11-SDAG-NEXT: s_cmp_lg_u32 s4, 0
+; GFX11-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
+; GFX11-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
+; GFX11-SDAG-NEXT: s_cselect_b32 s2, s4, s5
+; GFX11-SDAG-NEXT: s_lshr_b32 s3, s3, 16
+; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
+; GFX11-SDAG-NEXT: s_or_b32 s2, s3, s2
+; GFX11-SDAG-NEXT: s_mov_b32 s3, 0x31016000
+; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX11-SDAG-NEXT: buffer_store_b16 v0, off, s[0:3], 0
+; GFX11-SDAG-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: fptrunc_f64_to_f16:
; GFX11-GISEL: ; %bb.0:
@@ -570,30 +548,6 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; GFX11-GISEL-NEXT: s_mov_b32 s2, -1
; GFX11-GISEL-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-NEXT: s_endpgm
-;
-; GFX11-UNSAFE-DAG-TRUE16-LABEL: fptrunc_f64_to_f16:
-; GFX11-UNSAFE-DAG-TRUE16: ; %bb.0:
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_mov_b32 s2, -1
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
-; GFX11-UNSAFE-DAG-TRUE16-NEXT: s_endpgm
-;
-; GFX11-UNSAFE-DAG-FAKE16-LABEL: fptrunc_f64_to_f16:
-; GFX11-UNSAFE-DAG-FAKE16: ; %bb.0:
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_mov_b32 s2, -1
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: buffer_store_b16 v0, off, s[0:3], 0
-; GFX11-UNSAFE-DAG-FAKE16-NEXT: s_endpgm
%result = fptrunc double %in to half
%result_i16 = bitcast half %result to i16
store i16 %result_i16, ptr addrspace(1) %out
@@ -603,111 +557,27 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
define amdgpu_kernel void @fptrunc_f64_to_f16_afn(ptr addrspace(1) %out, double %in) {
; SI-LABEL: fptrunc_f64_to_f16_afn:
; SI: ; %bb.0:
-; SI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s3, 0xf000
-; SI-NEXT: s_movk_i32 s2, 0x7e00
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_lshr_b32 s0, s7, 8
-; SI-NEXT: s_and_b32 s1, s7, 0x1ff
-; SI-NEXT: s_and_b32 s8, s0, 0xffe
-; SI-NEXT: s_or_b32 s0, s1, s6
-; SI-NEXT: s_cmp_lg_u32 s0, 0
-; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; SI-NEXT: s_bfe_u32 s0, s7, 0xb0014
-; SI-NEXT: v_readfirstlane_b32 s1, v0
-; SI-NEXT: s_sub_i32 s6, 0x3f1, s0
-; SI-NEXT: s_or_b32 s1, s8, s1
-; SI-NEXT: v_med3_i32 v0, s6, 0, 13
-; SI-NEXT: s_or_b32 s6, s1, 0x1000
-; SI-NEXT: v_readfirstlane_b32 s8, v0
-; SI-NEXT: s_lshr_b32 s9, s6, s8
-; SI-NEXT: s_lshl_b32 s8, s9, s8
-; SI-NEXT: s_cmp_lg_u32 s8, s6
-; SI-NEXT: s_cselect_b32 s6, 1, 0
-; SI-NEXT: s_addk_i32 s0, 0xfc10
-; SI-NEXT: s_or_b32 s6, s9, s6
-; SI-NEXT: s_lshl_b32 s8, s0, 12
-; SI-NEXT: s_or_b32 s8, s1, s8
-; SI-NEXT: s_cmp_lt_i32 s0, 1
-; SI-NEXT: s_cselect_b32 s6, s6, s8
-; SI-NEXT: s_and_b32 s8, s6, 7
-; SI-NEXT: s_cmp_gt_i32 s8, 5
-; SI-NEXT: s_cselect_b32 s9, 1, 0
-; SI-NEXT: s_cmp_eq_u32 s8, 3
-; SI-NEXT: s_cselect_b32 s8, 1, 0
-; SI-NEXT: s_lshr_b32 s6, s6, 2
-; SI-NEXT: s_or_b32 s8, s8, s9
-; SI-NEXT: s_add_i32 s6, s6, s8
-; SI-NEXT: s_cmp_lt_i32 s0, 31
-; SI-NEXT: s_cselect_b32 s6, s6, 0x7c00
-; SI-NEXT: s_cmp_lg_u32 s1, 0
-; SI-NEXT: s_cselect_b32 s1, s2, 0x7c00
-; SI-NEXT: s_cmpk_eq_i32 s0, 0x40f
-; SI-NEXT: s_cselect_b32 s0, s1, s6
-; SI-NEXT: s_lshr_b32 s1, s7, 16
-; SI-NEXT: s_and_b32 s1, s1, 0x8000
-; SI-NEXT: s_or_b32 s6, s1, s0
-; SI-NEXT: s_mov_b32 s2, -1
-; SI-NEXT: s_mov_b32 s0, s4
-; SI-NEXT: s_mov_b32 s1, s5
-; SI-NEXT: v_mov_b32_e32 v0, s6
-; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
-; VI-SAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; VI-SAFE-SDAG: ; %bb.0:
-; VI-SAFE-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x24
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s0, s4
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s4, s7, 8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s4, 0xffe
-; VI-SAFE-SDAG-NEXT: s_and_b32 s4, s7, 0x1ff
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s6
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_mov_b32 s1, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b64 s[4:5], -1, 0
-; VI-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s4, v0
-; VI-SAFE-SDAG-NEXT: s_bfe_u32 s6, s7, 0xb0014
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s8, s4
-; VI-SAFE-SDAG-NEXT: s_sub_i32 s8, 0x3f1, s6
-; VI-SAFE-SDAG-NEXT: v_med3_i32 v0, s8, 0, 13
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; VI-SAFE-SDAG-NEXT: v_readfirstlane_b32 s8, v0
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s9, s5, s8
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s9, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s8, s5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; VI-SAFE-SDAG-NEXT: s_addk_i32 s6, 0xfc10
-; VI-SAFE-SDAG-NEXT: s_lshl_b32 s8, s6, 12
-; VI-SAFE-SDAG-NEXT: s_or_b32 s5, s9, s5
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s4, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 1
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_and_b32 s8, s5, 7
-; VI-SAFE-SDAG-NEXT: s_cmp_gt_i32 s8, 5
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s9, 1, 0
-; VI-SAFE-SDAG-NEXT: s_cmp_eq_u32 s8, 3
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s8, 1, 0
-; VI-SAFE-SDAG-NEXT: s_or_b32 s8, s8, s9
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; VI-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s8
-; VI-SAFE-SDAG-NEXT: s_cmp_lt_i32 s6, 31
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; VI-SAFE-SDAG-NEXT: s_movk_i32 s4, 0x7e00
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, 0x7c00
-; VI-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s6, 0x40f
-; VI-SAFE-SDAG-NEXT: s_cselect_b32 s4, s4, s5
-; VI-SAFE-SDAG-NEXT: s_lshr_b32 s5, s7, 16
-; VI-SAFE-SDAG-NEXT: s_and_b32 s5, s5, 0x8000
-; VI-SAFE-SDAG-NEXT: s_or_b32 s4, s5, s4
-; VI-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s4
-; VI-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-SAFE-SDAG-NEXT: s_endpgm
+; VI-SDAG-LABEL: fptrunc_f64_to_f16_afn:
+; VI-SDAG: ; %bb.0:
+; VI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; VI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; VI-SDAG-NEXT: s_mov_b32 s2, -1
+; VI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; VI-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; VI-SDAG-NEXT: s_endpgm
;
; VI-GISEL-LABEL: fptrunc_f64_to_f16_afn:
; VI-GISEL: ; %bb.0:
@@ -720,68 +590,16 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(ptr addrspace(1) %out, double
; VI-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; VI-GISEL-NEXT: s_endpgm
;
-; VI-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; VI-UNSAFE-SDAG: ; %bb.0:
-; VI-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; VI-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; VI-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; VI-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; VI-UNSAFE-SDAG-NEXT: s_endpgm
-;
-; GFX10-SAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; GFX10-SAFE-SDAG: ; %bb.0:
-; GFX10-SAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX10-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX10-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX10-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX10-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX10-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX10-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX10-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX10-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX10-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX10-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX10-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX10-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
-; GFX10-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-SAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-SAFE-SDAG-NEXT: s_endpgm
+; GFX10-SDAG-LABEL: fptrunc_f64_to_f16_afn:
+; GFX10-SDAG: ; %bb.0:
+; GFX10-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
+; GFX10-SDAG-NEXT: s_mov_b32 s3, 0x31016000
+; GFX10-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX10-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX10-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
+; GFX10-SDAG-NEXT: s_endpgm
;
; GFX10-GISEL-LABEL: fptrunc_f64_to_f16_afn:
; GFX10-GISEL: ; %bb.0:
@@ -794,74 +612,15 @@ define amdgpu_kernel void @fptrunc_f64_to_f16_afn(ptr addrspace(1) %out, double
; GFX10-GISEL-NEXT: buffer_store_short v0, off, s[0:3], 0
; GFX10-GISEL-NEXT: s_endpgm
;
-; GFX10-UNSAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
-; GFX10-UNSAFE-SDAG: ; %bb.0:
-; GFX10-UNSAFE-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-UNSAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX10-UNSAFE-SDAG-NEXT: s_mov_b32 s2, -1
-; GFX10-UNSAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; GFX10-UNSAFE-SDAG-NEXT: buffer_store_short v0, off, s[0:3], 0
-; GFX10-UNSAFE-SDAG-NEXT: s_endpgm
-;
; GFX11-SAFE-SDAG-LABEL: fptrunc_f64_to_f16_afn:
; GFX11-SAFE-SDAG: ; %bb.0:
; GFX11-SAFE-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX11-SAFE-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, -1, 0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX11-SAFE-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; GFX11-SAFE-SDAG-NEXT: s_sub_i32 s5, 0x3f1, s2
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SAFE-SDAG-NEXT: v_med3_i32 v1, s5, 0, 13
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s5, v0
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: v_readfirstlane_b32 s6, v1
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s4, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s4, 0x1000
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s7, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s7, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s6, s5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_addk_i32 s2, 0xfc10
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s5, s7, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshl_b32 s6, s2, 12
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s4, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 1
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s6, s5, 7
-; GFX11-SAFE-SDAG-NEXT: s_cmp_gt_i32 s6, 5
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s7, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_cmp_eq_u32 s6, 3
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s6, 1, 0
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s5, s5, 2
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s6, s6, s7
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_add_i32 s5, s5, s6
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lt_i32 s2, 31
-; GFX11-SAFE-SDAG-NEXT: s_movk_i32 s6, 0x7e00
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s5, s5, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s4, s6, 0x7c00
-; GFX11-SAFE-SDAG-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; GFX11-SAFE-SDAG-NEXT: s_cselect_b32 s2, s4, s5
-; GFX11-SAFE-SDAG-NEXT: s_lshr_b32 s3, s3, 16
-; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-SAFE-SDAG-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX11-SAFE-SDAG-NEXT: s_or_b32 s2, s3, s2
+; GFX11-SAFE-SDAG-NEXT: v_cvt_f32_f64_e32 v0, s[2:3]
; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s3, 0x31016000
-; GFX11-SAFE-SDAG-NEXT: v_mov_b32_e32 v0, s2
; GFX11-SAFE-SDAG-NEXT: s_mov_b32 s2, -1
+; GFX11-SAFE-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SAFE-SDAG-NEXT: v_cvt_f16_f32_e32 v0.l, v0
; GFX11-SAFE-SDAG-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-SAFE-SDAG-NEXT: s_endpgm
;
@@ -1833,4 +1592,8 @@ define amdgpu_kernel void @fptrunc_v8f64_to_v8f32_afn(ptr addrspace(1) %out, <8
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX10-SAFE-GISEL: {{.*}}
+; GFX10-SAFE-SDAG: {{.*}}
+; GFX10-UNSAFE-SDAG: {{.*}}
; VI-SAFE-GISEL: {{.*}}
+; VI-SAFE-SDAG: {{.*}}
+; VI-UNSAFE-SDAG: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll b/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
index 87c7cce..f81950b 100644
--- a/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/fsqrt.f32.ll
@@ -1294,13 +1294,13 @@ define float @v_sqrt_f32__enough_unsafe_attrs(float %x) #3 {
ret float %result
}
-define float @v_sqrt_f32__unsafe_attr(float %x) #4 {
+define float @v_sqrt_f32__unsafe_attr(float %x) {
; GCN-LABEL: v_sqrt_f32__unsafe_attr:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_sqrt_f32_e32 v0, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
- %result = call nsz float @llvm.sqrt.f32(float %x)
+ %result = call afn nsz float @llvm.sqrt.f32(float %x)
ret float %result
}
@@ -4763,7 +4763,6 @@ attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memo
attributes #1 = { convergent nounwind willreturn memory(none) }
attributes #2 = { "approx-func-fp-math"="true" }
attributes #3 = { "approx-func-fp-math"="true" "no-nans-fp-math"="true" "no-infs-fp-math"="true" }
-attributes #4 = { "unsafe-fp-math"="true" }
attributes #5 = { "no-infs-fp-math"="true" }
!0 = !{float 0.5}
diff --git a/llvm/test/CodeGen/AMDGPU/inline-asm-out-of-bounds-register.ll b/llvm/test/CodeGen/AMDGPU/inline-asm-out-of-bounds-register.ll
new file mode 100644
index 0000000..892955c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/inline-asm-out-of-bounds-register.ll
@@ -0,0 +1,98 @@
+; RUN: not llc -mtriple=amdgcn-amd-amdhsa -mcpu=bonaire -filetype=null %s 2>&1 | FileCheck -implicit-check-not=error %s
+
+; CHECK: error: couldn't allocate output register for constraint '{v256}'
+define void @out_of_bounds_vgpr32_def() {
+ %v = tail call i32 asm sideeffect "v_mov_b32 $0, -1", "={v256}"()
+ ret void
+}
+
+; CHECK: error: couldn't allocate output register for constraint '{v[255:256]}'
+define void @out_of_bounds_vgpr64_def_high_tuple() {
+ %v = tail call i32 asm sideeffect "v_mov_b32 $0, -1", "={v[255:256]}"()
+ ret void
+}
+
+; CHECK: error: couldn't allocate output register for constraint '{v[256:257]}'
+define void @out_of_bounds_vgpr64_def_low_tuple() {
+ %v = tail call i32 asm sideeffect "v_mov_b32 $0, -1", "={v[256:257]}"()
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v256}'
+define void @out_of_bounds_vgpr32_use() {
+ %v = tail call i32 asm sideeffect "v_mov_b32 %0, %1", "=v,{v256}"(i32 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[255:256]}'
+define void @out_of_bounds_vgpr64_high_tuple() {
+ tail call void asm sideeffect "; use %0", "{v[255:256]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[256:257]}'
+define void @out_of_bounds_vgpr64_low_tuple() {
+ tail call void asm sideeffect "; use %0", "{v[256:257]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[1:0]}'
+define void @vgpr_tuple_swapped() {
+ tail call void asm sideeffect "; use %0", "{v[1:0]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v4294967295}'
+define void @vgpr_uintmax() {
+ tail call void asm sideeffect "; use %0", "{v4294967295}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v4294967296}'
+define void @vgpr_uintmax_p1() {
+ tail call void asm sideeffect "; use %0", "{v4294967296}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[4294967295:4294967296]}'
+define void @vgpr_tuple_uintmax() {
+ tail call void asm sideeffect "; use %0", "{v[4294967295:4294967296]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[0:4294967295]}'
+define void @vgpr_tuple_0_uintmax() {
+ tail call void asm sideeffect "; use %0", "{v[0:4294967295]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[0:4294967296]}'
+define void @vgpr_tuple_0_uintmax_p1() {
+ tail call void asm sideeffect "; use %0", "{v[0:4294967296]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[4294967264:4294967295]}'
+define void @vgpr32_last_is_uintmax() {
+ tail call void asm sideeffect "; use %0", "{v[4294967264:4294967295]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[4294967265:4294967296]}'
+define void @vgpr32_last_is_uintmax_p1() {
+ tail call void asm sideeffect "; use %0", "{v[4294967265:4294967296]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[2:2147483651]}'
+define void @overflow_bitwidth_0() {
+ tail call void asm sideeffect "; use %0", "{v[2:2147483651]}"(i64 123)
+ ret void
+}
+
+; CHECK: error: couldn't allocate input reg for constraint '{v[2147483635:2147483651]}'
+define void @overflow_bitwidth_1() {
+ tail call void asm sideeffect "; use %0", "{v[2147483635:2147483651]}"(i64 123)
+ ret void
+}
+
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll
index 43c8d83..fd51759 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll
@@ -1,10 +1,188 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-TRUE16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-FAKE16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
+declare i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float, float, i32, i1)
+declare i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float, i32, i32, i32)
declare float @llvm.amdgcn.cvt.f32.fp8.e5m3(i32, i32)
+define i32 @test_cvt_pk_fp8_f32_word0(float %x, float %y, i32 %old) {
+; GFX1250-TRUE16-LABEL: test_cvt_pk_fp8_f32_word0:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-TRUE16-NEXT: v_cvt_pk_fp8_f32 v2.l, v0, v1 clamp
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_fp8_f32_word0:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-FAKE16-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 clamp
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: test_cvt_pk_fp8_f32_word0:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 clamp
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float %x, float %y, i32 %old, i1 false)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_pk_fp8_f32_word1(float %x, float %y, i32 %old) {
+; GFX1250-TRUE16-LABEL: test_cvt_pk_fp8_f32_word1:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-TRUE16-NEXT: v_cvt_pk_fp8_f32 v2.h, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_fp8_f32_word1:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-FAKE16-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-GISEL-LABEL: test_cvt_pk_fp8_f32_word1:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-GISEL-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float %x, float %y, i32 %old, i1 true)
+ ret i32 %ret
+}
+
+define amdgpu_cs void @test_cvt_pk_fp8_f32_word1_dpp(i32 %a, float %y, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-TRUE16-LABEL: test_cvt_pk_fp8_f32_word1_dpp:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-TRUE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-TRUE16-NEXT: v_cvt_pk_fp8_f32 v2.h, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-TRUE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-TRUE16-NEXT: s_endpgm
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_fp8_f32_word1_dpp:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-FAKE16-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_pk_fp8_f32_word1_dpp:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_mov_b32_dpp v0, v0 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1] clamp
+; GFX1250-GISEL-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %a, i32 228, i32 15, i32 15, i1 1)
+ %tmp1 = bitcast i32 %tmp0 to float
+ %ret = tail call i32 @llvm.amdgcn.cvt.pk.fp8.f32.e5m3(float %tmp1, float %y, i32 %old, i1 true)
+ store i32 %ret, ptr addrspace(1) %out
+ ret void
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte0(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte0:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 0)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte1(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 byte_sel:1 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 1)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte2(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte2:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 byte_sel:2 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 2)
+ ret i32 %ret
+}
+
+define i32 @test_cvt_sr_fp8_f32_byte3(float %x, i32 %r, i32 %old) {
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte3:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 byte_sel:3 clamp
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %x, i32 %r, i32 %old, i32 3)
+ ret i32 %ret
+}
+
+define amdgpu_cs void @test_cvt_sr_fp8_f32_byte1_dpp(i32 %a, i32 %r, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-TRUE16-LABEL: test_cvt_sr_fp8_f32_byte1_dpp:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-TRUE16-NEXT: v_cvt_sr_fp8_f32_e64_dpp v2, v0, v1 byte_sel:1 clamp quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-TRUE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-TRUE16-NEXT: s_endpgm
+;
+; GFX1250-FAKE16-LABEL: test_cvt_sr_fp8_f32_byte1_dpp:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-FAKE16-NEXT: v_cvt_sr_fp8_f32_e64_dpp v2, v0, v1 byte_sel:1 clamp quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_sr_fp8_f32_byte1_dpp:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-NEXT: v_cvt_sr_fp8_f32_e64_dpp v2, v0, v1 byte_sel:1 clamp quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf bound_ctrl:1
+; GFX1250-GISEL-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %tmp0 = call i32 @llvm.amdgcn.mov.dpp.i32(i32 %a, i32 228, i32 15, i32 15, i1 1)
+ %tmp1 = bitcast i32 %tmp0 to float
+ %ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32.e5m3(float %tmp1, i32 %r, i32 %old, i32 1)
+ store i32 %ret, ptr addrspace(1) %out
+ ret void
+}
+
define float @test_cvt_f32_fp8_e5m3_byte0(i32 %a) {
; GFX1250-LABEL: test_cvt_f32_fp8_e5m3_byte0:
; GFX1250: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll
new file mode 100644
index 0000000..6ccfad7
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.f16.ll
@@ -0,0 +1,539 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG-REAL16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG-FAKE16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL-REAL16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL-FAKE16 %s
+
+declare i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half>)
+declare i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half>)
+declare i32 @llvm.amdgcn.cvt.sr.bf8.f16(half, i32, i32, i32)
+declare i32 @llvm.amdgcn.cvt.sr.fp8.f16(half, i32, i32, i32)
+
+define amdgpu_ps void @test_cvt_pk_bf8_f16_v(<2 x half> %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_bf8_f16 v0.l, v0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[2:3], v0
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_bf8_f16 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[2:3], v0, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_bf8_f16 v0.l, v0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[4:5], v0
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_bf8_f16_v:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_bf8_f16 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[4:5], v0, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_bf8_f16_s(<2 x half> inreg %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, s0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, s0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, s0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_bf8_f16_s:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, s0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_bf8_f16_l(ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, 0x56400000
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, 0x56400000
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_bf8_f16 v2.l, 0x56400000
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_bf8_f16_l:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_bf8_f16 v2, 0x56400000
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.bf8.f16(<2 x half> <half 0.0, half 100.0>)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_fp8_f16_v(<2 x half> %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_fp8_f16 v0.l, v0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[2:3], v0
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_fp8_f16 v0, v0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[2:3], v0, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_fp8_f16 v0.l, v0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[4:5], v0
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_fp8_f16_v:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v2
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_fp8_f16 v0, v0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[4:5], v0, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_fp8_f16_s(<2 x half> inreg %a, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, s0
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, s0
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, s0
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_fp8_f16_s:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, s0
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> %a)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_pk_fp8_f16_l(ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, 0x56400000
+; GFX1250-SDAG-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, 0x56400000
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_pk_fp8_f16 v2.l, 0x56400000
+; GFX1250-GISEL-REAL16-NEXT: flat_store_b16 v[0:1], v2
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_pk_fp8_f16_l:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_pk_fp8_f16 v2, 0x56400000
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b16 v[0:1], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i16 @llvm.amdgcn.cvt.pk.fp8.f16(<2 x half> <half 0.0, half 100.0>)
+ store i16 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte0(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte1(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte1:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 1)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte2(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte2:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 2)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_byte3(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_byte3:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a, i32 %sr, i32 %old, i32 3)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_bf8_f16_hi_byte0(<2 x half> %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.h, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_lshrrev_b32 v0, 16, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-REAL16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_bf8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_bf8_f16_hi_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_bf8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %a.1 = extractelement <2 x half> %a, i32 1
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f16(half %a.1, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte0(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte1(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte1:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 1)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte2(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:2
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte2:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:2
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 2)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_byte3(half %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1 byte_sel:3
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_byte3:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_mov_b32 v6, v3 :: v_dual_mov_b32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1 byte_sel:3
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a, i32 %sr, i32 %old, i32 3)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_sr_fp8_f16_hi_byte0(<2 x half> %a, i32 %sr, i32 %old, ptr addrspace(1) %out) {
+; GFX1250-SDAG-REAL16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-SDAG-REAL16: ; %bb.0:
+; GFX1250-SDAG-REAL16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-SDAG-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.h, v1
+; GFX1250-SDAG-REAL16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-REAL16-NEXT: s_endpgm
+;
+; GFX1250-SDAG-FAKE16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-SDAG-FAKE16: ; %bb.0:
+; GFX1250-SDAG-FAKE16-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_lshrrev_b32 v0, 16, v0
+; GFX1250-SDAG-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX1250-SDAG-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-SDAG-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-SDAG-FAKE16-NEXT: global_store_b32 v[4:5], v2, off
+; GFX1250-SDAG-FAKE16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-REAL16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-GISEL-REAL16: ; %bb.0:
+; GFX1250-GISEL-REAL16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-REAL16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-REAL16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-REAL16-NEXT: v_cvt_sr_fp8_f16 v2, v0.l, v1
+; GFX1250-GISEL-REAL16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-REAL16-NEXT: s_endpgm
+;
+; GFX1250-GISEL-FAKE16-LABEL: test_cvt_sr_fp8_f16_hi_byte0:
+; GFX1250-GISEL-FAKE16: ; %bb.0:
+; GFX1250-GISEL-FAKE16-NEXT: v_dual_lshrrev_b32 v0, 16, v0 :: v_dual_mov_b32 v6, v3
+; GFX1250-GISEL-FAKE16-NEXT: v_mov_b32_e32 v7, v4
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1250-GISEL-FAKE16-NEXT: v_cvt_sr_fp8_f16 v2, v0, v1
+; GFX1250-GISEL-FAKE16-NEXT: global_store_b32 v[6:7], v2, off
+; GFX1250-GISEL-FAKE16-NEXT: s_endpgm
+ %a.1 = extractelement <2 x half> %a, i32 1
+ %cvt = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f16(half %a.1, i32 %sr, i32 %old, i32 0)
+ store i32 %cvt, ptr addrspace(1) %out
+ ret void
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX1250: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll
new file mode 100644
index 0000000..2179800
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.pk.f16.ll
@@ -0,0 +1,64 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+
+declare <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float, float, i32) #0
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_vvv(float %src0, float %src1, i32 %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, v0, v1, v2
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_sss(float inreg %src0, float inreg %src1, i32 inreg %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_sss:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, s0, s1, v0
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_vvi(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_vvi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, v0, v1, 0x10002
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 65538) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_vvi_mods(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_vvi_mods:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, -v0, |v1|, 1
+; GCN-NEXT: ; return to shader part epilog
+ %s0 = fneg float %src0
+ %s1 = call float @llvm.fabs.f32(float %src1) #0
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %s0, float %s1, i32 1) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_f16_f32_ssi(float inreg %src0, float inreg %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_f16_f32_ssi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_f16_f32 v0, s0, s1, 1
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x half> @llvm.amdgcn.cvt.sr.pk.f16.f32(float %src0, float %src1, i32 1) #0
+ %ret = bitcast <2 x half> %cvt to float
+ ret float %ret
+}
+
+declare float @llvm.fabs.f32(float) #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll
new file mode 100644
index 0000000..4309cfbe
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scale.pk.ll
@@ -0,0 +1,164 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s
+; RUN: llc -global-isel=1 -global-isel-abort=2 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
+
+declare <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.bf8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.bf8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp4(i32 %src, i32 %scale, i32 %scale_sel)
+declare <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp4(i32 %src, i32 %scale, i32 %scale_sel)
+declare <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.bf8(<2 x i32> %src, i32 %scale, i32 %scale_sel)
+declare <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp4(i32 %src, i32 %scale, i32 %scale_sel)
+
+define amdgpu_ps void @test_cvt_scale_pk8_f16_fp8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f16_fp8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f16_fp8 v[4:7], v[0:1], v2 scale_sel:1
+; GFX1250-SDAG-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f16_fp8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v9, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f16_fp8 v[4:7], v[0:1], v2 scale_sel:1
+; GFX1250-GISEL-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp8(<2 x i32> %src, i32 %scale, i32 1)
+ store <8 x half> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f16_bf8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f16_bf8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f16_bf8 v[4:7], v[0:1], v2
+; GFX1250-SDAG-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f16_bf8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v9, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f16_bf8 v[4:7], v[0:1], v2
+; GFX1250-GISEL-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.bf8(<2 x i32> %src, i32 %scale, i32 0)
+ store <8 x half> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_bf16_fp8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_bf16_fp8_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-NEXT: v_cvt_scale_pk8_bf16_fp8 v[4:7], v[0:1], v2 scale_sel:1
+; GFX1250-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp8(<2 x i32> %src, i32 %scale, i32 1)
+ store <8 x bfloat> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_bf16_bf8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_bf16_bf8_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_dual_mov_b32 v9, v4 :: v_dual_mov_b32 v8, v3
+; GFX1250-NEXT: v_cvt_scale_pk8_bf16_bf8 v[4:7], v[0:1], v2 scale_sel:2
+; GFX1250-NEXT: global_store_b128 v[8:9], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.bf8(<2 x i32> %src, i32 %scale, i32 2)
+ store <8 x bfloat> %cvt, ptr addrspace(1) %out, align 8
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f16_fp4_vv(i32 %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_f16_fp4_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_cvt_scale_pk8_f16_fp4 v[4:7], v0, v1 scale_sel:3
+; GFX1250-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x half> @llvm.amdgcn.cvt.scale.pk8.f16.fp4(i32 %src, i32 %scale, i32 3)
+ store <8 x half> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_bf16_fp4_vv(i32 %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-LABEL: test_cvt_scale_pk8_bf16_fp4_vv:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_cvt_scale_pk8_bf16_fp4 v[4:7], v0, v1 scale_sel:4
+; GFX1250-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-NEXT: s_endpgm
+ %cvt = tail call <8 x bfloat> @llvm.amdgcn.cvt.scale.pk8.bf16.fp4(i32 %src, i32 %scale, i32 4)
+ store <8 x bfloat> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f32_fp8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f32_fp8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v13, v4 :: v_dual_mov_b32 v12, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f32_fp8 v[4:11], v[0:1], v2 scale_sel:7
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f32_fp8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v12, v3 :: v_dual_mov_b32 v13, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f32_fp8 v[4:11], v[0:1], v2 scale_sel:7
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp8(<2 x i32> %src, i32 %scale, i32 7)
+ store <8 x float> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f32_bf8_vv(<2 x i32> %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f32_bf8_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v13, v4 :: v_dual_mov_b32 v12, v3
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f32_bf8 v[4:11], v[0:1], v2
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f32_bf8_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v12, v3 :: v_dual_mov_b32 v13, v4
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f32_bf8 v[4:11], v[0:1], v2
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[4:7], off
+; GFX1250-GISEL-NEXT: global_store_b128 v[12:13], v[8:11], off offset:16
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.bf8(<2 x i32> %src, i32 %scale, i32 0)
+ store <8 x float> %cvt, ptr addrspace(1) %out, align 16
+ ret void
+}
+
+define amdgpu_ps void @test_cvt_scale_pk8_f32_fp4_vv(i32 %src, i32 %scale, ptr addrspace(1) %out) {
+; GFX1250-SDAG-LABEL: test_cvt_scale_pk8_f32_fp4_vv:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: v_cvt_scale_pk8_f32_fp4 v[4:11], v0, v1 scale_sel:1
+; GFX1250-SDAG-NEXT: s_clause 0x1
+; GFX1250-SDAG-NEXT: global_store_b128 v[2:3], v[8:11], off offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: test_cvt_scale_pk8_f32_fp4_vv:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: v_cvt_scale_pk8_f32_fp4 v[4:11], v0, v1 scale_sel:1
+; GFX1250-GISEL-NEXT: s_clause 0x1
+; GFX1250-GISEL-NEXT: global_store_b128 v[2:3], v[4:7], off
+; GFX1250-GISEL-NEXT: global_store_b128 v[2:3], v[8:11], off offset:16
+; GFX1250-GISEL-NEXT: s_endpgm
+ %cvt = tail call <8 x float> @llvm.amdgcn.cvt.scale.pk8.f32.fp4(i32 %src, i32 %scale, i32 1)
+ store <8 x float> %cvt, ptr addrspace(1) %out, align 32
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll
index 291a4e2..217c306 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.scalef32.pk.gfx950.ll
@@ -168,7 +168,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -179,7 +179,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -213,7 +213,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -225,7 +225,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v2, v0, v1 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -259,7 +259,7 @@ define float @test_cvt_scalef32_f32_fp8_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -269,7 +269,7 @@ define float @test_cvt_scalef32_f32_fp8_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -300,7 +300,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -311,7 +311,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_lo(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_lo:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -345,7 +345,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -357,7 +357,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_hi(i32 %src, float %scale
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_hi:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v2, v0, v1 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v2
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -391,7 +391,7 @@ define float @test_cvt_scalef32_f32_bf8_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -401,7 +401,7 @@ define float @test_cvt_scalef32_f32_bf8_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -773,7 +773,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f32_fp4_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 1)
ret <2 x float> %ret
@@ -783,7 +783,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f32_fp4_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 2)
ret <2 x float> %ret
@@ -895,7 +895,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f16_fp4_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 1)
ret <2 x half> %ret
@@ -905,7 +905,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_f16_fp4_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 2)
ret <2 x half> %ret
@@ -935,7 +935,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte1(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte1:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 1)
ret <2 x bfloat> %ret
@@ -945,7 +945,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte2(i32 %src, float %scale) {
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte2:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, v0, v1 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 2)
ret <2 x bfloat> %ret
@@ -1602,7 +1602,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -1613,7 +1613,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.fp8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -1647,7 +1647,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte1_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte1_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1659,7 +1659,7 @@ define <2 x half> @test_cvt_scalef32_f16_fp8_byte2_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_fp8_byte2_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_fp8 v1, s0, v0 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1693,7 +1693,7 @@ define float @test_cvt_scalef32_f32_fp8_byte1_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -1703,7 +1703,7 @@ define float @test_cvt_scalef32_f32_fp8_byte2_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_fp8_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_fp8 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.fp8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -1734,7 +1734,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 1, i1 false)
@@ -1745,7 +1745,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_lo_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_lo_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.f16.bf8(<2 x half> %old, i32 %src, float %scale, i32 2, i1 false)
@@ -1779,7 +1779,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte1_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte1_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1791,7 +1791,7 @@ define <2 x half> @test_cvt_scalef32_f16_bf8_byte2_dst_hi_inreg_src(i32 inreg %s
; GCN-LABEL: test_cvt_scalef32_f16_bf8_byte2_dst_hi_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[1,0,1]
+; GCN-NEXT: v_cvt_scalef32_f16_bf8 v1, s0, v0 op_sel:[0,1,1]
; GCN-NEXT: s_nop 0
; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: s_setpc_b64 s[30:31]
@@ -1825,7 +1825,7 @@ define float @test_cvt_scalef32_f32_bf8_byte1_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 1)
ret float %ret
@@ -1835,7 +1835,7 @@ define float @test_cvt_scalef32_f32_bf8_byte2_inreg_src(i32 inreg %src, float %s
; GCN-LABEL: test_cvt_scalef32_f32_bf8_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_f32_bf8 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.scalef32.f32.bf8(i32 %src, float %scale, i32 2)
ret float %ret
@@ -2032,7 +2032,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte1_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f32_fp4_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 1)
ret <2 x float> %ret
@@ -2042,7 +2042,7 @@ define <2 x float> @test_cvt_scale_f32_fp4_byte2_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f32_fp4_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f32_fp4 v[0:1], s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.scalef32.pk.f32.fp4(i32 %src, float %scale, i32 2)
ret <2 x float> %ret
@@ -2112,7 +2112,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte1_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f16_fp4_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 1)
ret <2 x half> %ret
@@ -2122,7 +2122,7 @@ define <2 x half> @test_cvt_scale_f16_fp4_byte2_inreg_src(i32 inreg %src, float
; GCN-LABEL: test_cvt_scale_f16_fp4_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_f16_fp4 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x half> @llvm.amdgcn.cvt.scalef32.pk.f16.fp4(i32 %src, float %scale, i32 2)
ret <2 x half> %ret
@@ -2152,7 +2152,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte1_inreg_src(i32 inreg %src, flo
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte1_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[0,1,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[1,0,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 1)
ret <2 x bfloat> %ret
@@ -2162,7 +2162,7 @@ define <2 x bfloat> @test_cvt_scale_bf16_fp4_byte2_inreg_src(i32 inreg %src, flo
; GCN-LABEL: test_cvt_scale_bf16_fp4_byte2_inreg_src:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[1,0,0]
+; GCN-NEXT: v_cvt_scalef32_pk_bf16_fp4 v0, s0, v0 op_sel:[0,1,0]
; GCN-NEXT: s_setpc_b64 s[30:31]
%ret = tail call <2 x bfloat> @llvm.amdgcn.cvt.scalef32.pk.bf16.fp4(i32 %src, float %scale, i32 2)
ret <2 x bfloat> %ret
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll
new file mode 100644
index 0000000..82991ae
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.sr.pk.bf16.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+; xUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefix=GCN %s
+
+; FIXME: GlobalISel does not work with bf16
+
+declare <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float, float, i32) #0
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_vvv(float %src0, float %src1, i32 %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, v0, v1, v2
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_sss(float inreg %src0, float inreg %src1, i32 inreg %src2) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_sss:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, s0, s1, v0
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 %src2) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_vvi(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_vvi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, v0, v1, 0x10002
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 65538) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_vvi_mods(float %src0, float %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_vvi_mods:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, -v0, |v1|, 1
+; GCN-NEXT: ; return to shader part epilog
+ %s0 = fneg float %src0
+ %s1 = call float @llvm.fabs.f32(float %src1) #0
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %s0, float %s1, i32 1) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+define amdgpu_ps float @cvt_sr_pk_bf16_f32_ssi(float inreg %src0, float inreg %src1) #1 {
+; GCN-LABEL: cvt_sr_pk_bf16_f32_ssi:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_cvt_sr_pk_bf16_f32 v0, s0, s1, 1
+; GCN-NEXT: ; return to shader part epilog
+ %cvt = call <2 x bfloat> @llvm.amdgcn.cvt.sr.pk.bf16.f32(float %src0, float %src1, i32 1) #0
+ %ret = bitcast <2 x bfloat> %cvt to float
+ ret float %ret
+}
+
+declare float @llvm.fabs.f32(float) #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
index 425a853..477f0a6 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.ll
@@ -51,7 +51,7 @@ define amdgpu_kernel void @safe_no_fp32_denormals_rcp_f32(ptr addrspace(1) %out,
; SI-NOT: [[RESULT]]
; SI: buffer_store_dword [[RESULT]]
define amdgpu_kernel void @safe_f32_denormals_rcp_pat_f32(ptr addrspace(1) %out, float %src) #4 {
- %rcp = fdiv float 1.0, %src, !fpmath !0
+ %rcp = fdiv afn float 1.0, %src, !fpmath !0
store float %rcp, ptr addrspace(1) %out, align 4
ret void
}
@@ -105,8 +105,8 @@ define amdgpu_kernel void @safe_rsq_rcp_pat_amdgcn_sqrt_f32_nocontract(ptr addrs
; SI: v_sqrt_f32_e32
; SI: v_rcp_f32_e32
define amdgpu_kernel void @unsafe_rsq_rcp_pat_f32(ptr addrspace(1) %out, float %src) #2 {
- %sqrt = call float @llvm.sqrt.f32(float %src)
- %rcp = call float @llvm.amdgcn.rcp.f32(float %sqrt)
+ %sqrt = call afn float @llvm.sqrt.f32(float %src)
+ %rcp = call afn float @llvm.amdgcn.rcp.f32(float %sqrt)
store float %rcp, ptr addrspace(1) %out, align 4
ret void
}
@@ -148,7 +148,7 @@ define amdgpu_kernel void @rcp_pat_f64(ptr addrspace(1) %out, double %src) #1 {
; SI: v_fma_f64
; SI: v_fma_f64
define amdgpu_kernel void @unsafe_rcp_pat_f64(ptr addrspace(1) %out, double %src) #2 {
- %rcp = fdiv double 1.0, %src
+ %rcp = fdiv afn double 1.0, %src
store double %rcp, ptr addrspace(1) %out, align 8
ret void
}
@@ -214,9 +214,9 @@ define amdgpu_kernel void @unsafe_amdgcn_sqrt_rsq_rcp_pat_f64(ptr addrspace(1) %
}
attributes #0 = { nounwind readnone }
-attributes #1 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #2 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #3 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="ieee,ieee" }
-attributes #4 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="ieee,ieee" }
+attributes #1 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #2 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #3 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
+attributes #4 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
!0 = !{float 2.500000e+00}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
index 8c1e166..7151fee 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
@@ -3227,72 +3227,6 @@ define float @v_exp_f32_fast(float %in) {
ret float %result
}
-define float @v_exp_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; GCN-SDAG-LABEL: v_exp_f32_unsafe_math_attr:
-; GCN-SDAG: ; %bb.0:
-; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-SDAG-NEXT: s_mov_b32 s4, 0xc2aeac50
-; GCN-SDAG-NEXT: v_add_f32_e32 v1, 0x42800000, v0
-; GCN-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; GCN-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; GCN-SDAG-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN-GISEL-LABEL: v_exp_f32_unsafe_math_attr:
-; GCN-GISEL: ; %bb.0:
-; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; GCN-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; GCN-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; GCN-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; GCN-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SDAG-LABEL: v_exp_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0xc2aeac50
-; SI-SDAG-NEXT: v_add_f32_e32 v1, 0x42800000, v0
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_exp_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; SI-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; SI-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; SI-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; SI-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_exp_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_exp_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.exp.f32(float %in)
- ret float %result
-}
-
define float @v_exp_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; GCN-SDAG-LABEL: v_exp_f32_approx_fn_attr:
; GCN-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
index edc505b..918b1b2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
@@ -3235,78 +3235,6 @@ define float @v_exp10_f32_fast(float %in) {
ret float %result
}
-define float @v_exp10_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; GCN-SDAG-LABEL: v_exp10_f32_unsafe_math_attr:
-; GCN-SDAG: ; %bb.0:
-; GCN-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-SDAG-NEXT: s_mov_b32 s4, 0xc217b818
-; GCN-SDAG-NEXT: v_add_f32_e32 v1, 0x42000000, v0
-; GCN-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: v_mul_f32_e32 v1, 0x3a2784bc, v0
-; GCN-SDAG-NEXT: v_mul_f32_e32 v0, 0x40549000, v0
-; GCN-SDAG-NEXT: v_exp_f32_e32 v1, v1
-; GCN-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; GCN-SDAG-NEXT: v_mul_f32_e32 v0, v0, v1
-; GCN-SDAG-NEXT: v_mul_f32_e32 v1, 0xa4fb11f, v0
-; GCN-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GCN-GISEL-LABEL: v_exp10_f32_unsafe_math_attr:
-; GCN-GISEL: ; %bb.0:
-; GCN-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; GCN-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; GCN-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; GCN-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; GCN-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; GCN-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SDAG-LABEL: v_exp10_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0xc217b818
-; SI-SDAG-NEXT: v_add_f32_e32 v1, 0x42000000, v0
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x3a2784bc, v0
-; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x40549000, v0
-; SI-SDAG-NEXT: v_exp_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mul_f32_e32 v0, v0, v1
-; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0xa4fb11f, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_exp10_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0xc2aeac50
-; SI-GISEL-NEXT: v_add_f32_e32 v2, 0x42800000, v0
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
-; SI-GISEL-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
-; SI-GISEL-NEXT: v_exp_f32_e32 v0, v0
-; SI-GISEL-NEXT: v_mul_f32_e32 v1, 0x114b4ea4, v0
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_exp10_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_exp10_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.exp10.f32(float %in)
- ret float %result
-}
-
define float @v_exp10_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; GCN-SDAG-LABEL: v_exp10_f32_approx_fn_attr:
; GCN-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log.ll b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
index 38d1b47..307fa89 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
@@ -3076,121 +3076,6 @@ define float @v_log_f32_fast(float %in) {
ret float %result
}
-define float @v_log_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; SI-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v0, v0, v2
-; SI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc1b17218
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x3f317218
-; SI-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; SI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc1b17218
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x3f317218
-; SI-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; VI-SDAG: ; %bb.0:
-; VI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; VI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; VI-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; VI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; VI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc1b17218
-; VI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; VI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3f317218, v0
-; VI-SDAG-NEXT: v_add_f32_e32 v0, v0, v1
-; VI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; VI-GISEL: ; %bb.0:
-; VI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; VI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; VI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc1b17218
-; VI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; VI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; VI-GISEL-NEXT: v_mul_f32_e32 v1, 0x3f317218, v2
-; VI-GISEL-NEXT: v_add_f32_e32 v0, v1, v0
-; VI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; GFX900-SDAG: ; %bb.0:
-; GFX900-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; GFX900-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GFX900-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; GFX900-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX900-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX900-SDAG-NEXT: v_mov_b32_e32 v1, 0xc1b17218
-; GFX900-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x3f317218
-; GFX900-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; GFX900-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; GFX900-GISEL: ; %bb.0:
-; GFX900-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-GISEL-NEXT: v_log_f32_e32 v2, v0
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v3, 0xc1b17218
-; GFX900-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GFX900-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x3f317218
-; GFX900-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; GFX900-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-SDAG-LABEL: v_log_f32_unsafe_math_attr:
-; GFX1100-SDAG: ; %bb.0:
-; GFX1100-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-SDAG-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc_lo
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 0xc1b17218, vcc_lo
-; GFX1100-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1100-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX1100-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX1100-SDAG-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-SDAG-NEXT: v_fmamk_f32 v0, v0, 0x3f317218, v1
-; GFX1100-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-GISEL-LABEL: v_log_f32_unsafe_math_attr:
-; GFX1100-GISEL: ; %bb.0:
-; GFX1100-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-GISEL-NEXT: v_log_f32_e32 v1, v0
-; GFX1100-GISEL-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 0xc1b17218, vcc_lo
-; GFX1100-GISEL-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-GISEL-NEXT: v_fmac_f32_e32 v0, 0x3f317218, v1
-; GFX1100-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_log_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_log_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.log.f32(float %in)
- ret float %result
-}
-
define float @v_log_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; SI-SDAG-LABEL: v_log_f32_approx_fn_attr:
; SI-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
index 058933f..5278589 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
@@ -3076,121 +3076,6 @@ define float @v_log10_f32_fast(float %in) {
ret float %result
}
-define float @v_log10_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
-; SI-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; SI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; SI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v0, v0, v2
-; SI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc11a209b
-; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; SI-SDAG-NEXT: s_mov_b32 s4, 0x3e9a209b
-; SI-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; SI-GISEL: ; %bb.0:
-; SI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; SI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc11a209b
-; SI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; SI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; SI-GISEL-NEXT: v_mov_b32_e32 v1, 0x3e9a209b
-; SI-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; SI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; VI-SDAG: ; %bb.0:
-; VI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; VI-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; VI-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; VI-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; VI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; VI-SDAG-NEXT: v_mov_b32_e32 v1, 0xc11a209b
-; VI-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; VI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3e9a209b, v0
-; VI-SDAG-NEXT: v_add_f32_e32 v0, v0, v1
-; VI-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; VI-GISEL: ; %bb.0:
-; VI-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-GISEL-NEXT: v_log_f32_e32 v2, v0
-; VI-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; VI-GISEL-NEXT: v_mov_b32_e32 v3, 0xc11a209b
-; VI-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; VI-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; VI-GISEL-NEXT: v_mul_f32_e32 v1, 0x3e9a209b, v2
-; VI-GISEL-NEXT: v_add_f32_e32 v0, v1, v0
-; VI-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX900-SDAG: ; %bb.0:
-; GFX900-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x800000
-; GFX900-SDAG-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
-; GFX900-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc
-; GFX900-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX900-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX900-SDAG-NEXT: v_mov_b32_e32 v1, 0xc11a209b
-; GFX900-SDAG-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX900-SDAG-NEXT: s_mov_b32 s4, 0x3e9a209b
-; GFX900-SDAG-NEXT: v_fma_f32 v0, v0, s4, v1
-; GFX900-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX900-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX900-GISEL: ; %bb.0:
-; GFX900-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-GISEL-NEXT: v_log_f32_e32 v2, v0
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x800000
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v3, 0xc11a209b
-; GFX900-GISEL-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
-; GFX900-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v3, vcc
-; GFX900-GISEL-NEXT: v_mov_b32_e32 v1, 0x3e9a209b
-; GFX900-GISEL-NEXT: v_fma_f32 v0, v2, v1, v0
-; GFX900-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-SDAG-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX1100-SDAG: ; %bb.0:
-; GFX1100-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-SDAG-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v2, 0, 32, vcc_lo
-; GFX1100-SDAG-NEXT: v_cndmask_b32_e64 v1, 0, 0xc11a209b, vcc_lo
-; GFX1100-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1100-SDAG-NEXT: v_ldexp_f32 v0, v0, v2
-; GFX1100-SDAG-NEXT: v_log_f32_e32 v0, v0
-; GFX1100-SDAG-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-SDAG-NEXT: v_fmamk_f32 v0, v0, 0x3e9a209b, v1
-; GFX1100-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX1100-GISEL-LABEL: v_log10_f32_unsafe_math_attr:
-; GFX1100-GISEL: ; %bb.0:
-; GFX1100-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX1100-GISEL-NEXT: v_log_f32_e32 v1, v0
-; GFX1100-GISEL-NEXT: v_cmp_gt_f32_e32 vcc_lo, 0x800000, v0
-; GFX1100-GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 0xc11a209b, vcc_lo
-; GFX1100-GISEL-NEXT: s_waitcnt_depctr 0xfff
-; GFX1100-GISEL-NEXT: v_fmac_f32_e32 v0, 0x3e9a209b, v1
-; GFX1100-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; R600-LABEL: v_log10_f32_unsafe_math_attr:
-; R600: ; %bb.0:
-; R600-NEXT: CF_END
-; R600-NEXT: PAD
-;
-; CM-LABEL: v_log10_f32_unsafe_math_attr:
-; CM: ; %bb.0:
-; CM-NEXT: CF_END
-; CM-NEXT: PAD
- %result = call float @llvm.log10.f32(float %in)
- ret float %result
-}
-
define float @v_log10_f32_approx_fn_attr(float %in) "approx-func-fp-math"="true" {
; SI-SDAG-LABEL: v_log10_f32_approx_fn_attr:
; SI-SDAG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
index 1e6b77e..702a69f 100644
--- a/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
+++ b/llvm/test/CodeGen/AMDGPU/loop-prefetch-data.ll
@@ -77,7 +77,7 @@ define amdgpu_kernel void @copy_flat(ptr nocapture %d, ptr nocapture readonly %s
; GFX1250-NEXT: s_add_nc_u64 s[2:3], s[2:3], 16
; GFX1250-NEXT: s_cmp_lg_u32 s6, 0
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: flat_store_b128 v0, v[2:5], s[0:1]
+; GFX1250-NEXT: flat_store_b128 v0, v[2:5], s[0:1] scope:SCOPE_SE
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: s_add_nc_u64 s[0:1], s[0:1], 16
; GFX1250-NEXT: s_cbranch_scc1 .LBB0_2
@@ -400,9 +400,9 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v2, s1, s6, v0
+; GFX12-NEXT: v_add_co_u32 v2, s1, v0, s6
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
+; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
; GFX12-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-NEXT: s_wait_alu 0xf1ff
@@ -438,9 +438,9 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-SPREFETCH-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-SPREFETCH-NEXT: s_wait_kmcnt 0x0
-; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, s6, v0
+; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, v0, s6
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
+; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-SPREFETCH-NEXT: s_wait_alu 0xf1ff
@@ -490,7 +490,7 @@ define amdgpu_kernel void @copy_flat_divergent(ptr nocapture %d, ptr nocapture r
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_cmp_lg_u32 s0, 0
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: flat_store_b128 v[0:1], v[4:7]
+; GFX1250-NEXT: flat_store_b128 v[0:1], v[4:7] scope:SCOPE_SE
; GFX1250-NEXT: s_wait_xcnt 0x0
; GFX1250-NEXT: v_add_nc_u64_e32 v[0:1], 16, v[0:1]
; GFX1250-NEXT: s_cbranch_scc1 .LBB4_2
@@ -531,9 +531,9 @@ define amdgpu_kernel void @copy_global_divergent(ptr addrspace(1) nocapture %d,
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: v_add_co_u32 v2, s1, s6, v0
+; GFX12-NEXT: v_add_co_u32 v2, s1, v0, s6
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
+; GFX12-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
; GFX12-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-NEXT: s_wait_alu 0xf1ff
@@ -569,9 +569,9 @@ define amdgpu_kernel void @copy_global_divergent(ptr addrspace(1) nocapture %d,
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX12-SPREFETCH-NEXT: v_lshlrev_b32_e32 v0, 4, v0
; GFX12-SPREFETCH-NEXT: s_wait_kmcnt 0x0
-; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, s6, v0
+; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, s1, v0, s6
; GFX12-SPREFETCH-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, s7, 0, s1
+; GFX12-SPREFETCH-NEXT: v_add_co_ci_u32_e64 v3, null, 0, s7, s1
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v0, s1, s4, v0
; GFX12-SPREFETCH-NEXT: v_add_co_u32 v2, vcc_lo, 0xb0, v2
; GFX12-SPREFETCH-NEXT: s_wait_alu 0xf1ff
diff --git a/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll b/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll
index be02045..4c0ab91 100644
--- a/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll
+++ b/llvm/test/CodeGen/AMDGPU/memintrinsic-unroll.ll
@@ -6982,7 +6982,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; CHECK-NEXT: global_store_dwordx4 v[100:101], v[96:99], off offset:16
; CHECK-NEXT: s_cmp_lg_u64 s[4:5], 0x800
; CHECK-NEXT: s_cbranch_scc1 .LBB6_2
-; CHECK-NEXT: .LBB6_3: ; %Flow9
+; CHECK-NEXT: .LBB6_3: ; %Flow7
; CHECK-NEXT: s_andn2_saveexec_b32 s8, s6
; CHECK-NEXT: s_cbranch_execz .LBB6_6
; CHECK-NEXT: ; %bb.4: ; %memmove_bwd_loop.preheader
@@ -7048,7 +7048,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; CHECK-NEXT: global_store_dwordx4 v[100:101], v[96:99], off offset:16
; CHECK-NEXT: s_cmp_eq_u64 s[4:5], s[6:7]
; CHECK-NEXT: s_cbranch_scc0 .LBB6_5
-; CHECK-NEXT: .LBB6_6: ; %Flow10
+; CHECK-NEXT: .LBB6_6: ; %Flow8
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: s_setpc_b64 s[30:31]
;
@@ -7689,7 +7689,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; ALIGNED-NEXT: global_store_byte v[16:17], v11, off offset:3
; ALIGNED-NEXT: global_store_byte v[16:17], v4, off offset:1
; ALIGNED-NEXT: s_cbranch_scc1 .LBB6_2
-; ALIGNED-NEXT: .LBB6_3: ; %Flow9
+; ALIGNED-NEXT: .LBB6_3: ; %Flow7
; ALIGNED-NEXT: s_andn2_saveexec_b32 s8, s6
; ALIGNED-NEXT: s_cbranch_execz .LBB6_6
; ALIGNED-NEXT: ; %bb.4: ; %memmove_bwd_loop.preheader
@@ -8316,7 +8316,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; ALIGNED-NEXT: global_store_byte v[16:17], v11, off offset:3
; ALIGNED-NEXT: global_store_byte v[16:17], v4, off offset:1
; ALIGNED-NEXT: s_cbranch_scc0 .LBB6_5
-; ALIGNED-NEXT: .LBB6_6: ; %Flow10
+; ALIGNED-NEXT: .LBB6_6: ; %Flow8
; ALIGNED-NEXT: s_or_b32 exec_lo, exec_lo, s8
; ALIGNED-NEXT: s_clause 0x7
; ALIGNED-NEXT: buffer_load_dword v47, off, s[0:3], s32
@@ -8369,7 +8369,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; UNROLL3-NEXT: global_store_dwordx4 v[0:1], v[2:5], off offset:2032
; UNROLL3-NEXT: ; implicit-def: $vgpr2_vgpr3
; UNROLL3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; UNROLL3-NEXT: .LBB6_4: ; %Flow7
+; UNROLL3-NEXT: .LBB6_4: ; %Flow5
; UNROLL3-NEXT: s_andn2_saveexec_b32 s8, s6
; UNROLL3-NEXT: s_cbranch_execz .LBB6_7
; UNROLL3-NEXT: ; %bb.5: ; %memmove_bwd_residual
@@ -8403,7 +8403,7 @@ define void @memmove_p1_p1_sz2048(ptr addrspace(1) align 1 %dst, ptr addrspace(1
; UNROLL3-NEXT: global_store_dwordx4 v[16:17], v[12:15], off offset:32
; UNROLL3-NEXT: s_cmp_eq_u64 s[4:5], s[6:7]
; UNROLL3-NEXT: s_cbranch_scc0 .LBB6_6
-; UNROLL3-NEXT: .LBB6_7: ; %Flow8
+; UNROLL3-NEXT: .LBB6_7: ; %Flow6
; UNROLL3-NEXT: s_or_b32 exec_lo, exec_lo, s8
; UNROLL3-NEXT: s_setpc_b64 s[30:31]
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
index 272daa9..dd5c247 100644
--- a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
+++ b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
@@ -460,10 +460,10 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB3_3
-; CHECK-NEXT: ; %bb.1: ; %Flow34
+; CHECK-NEXT: ; %bb.1: ; %Flow36
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execnz .LBB3_10
-; CHECK-NEXT: .LBB3_2: ; %Flow35
+; CHECK-NEXT: .LBB3_2: ; %Flow37
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s6
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
@@ -494,7 +494,7 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, s6
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s9
; CHECK-NEXT: s_cbranch_execnz .LBB3_5
-; CHECK-NEXT: .LBB3_6: ; %Flow29
+; CHECK-NEXT: .LBB3_6: ; %Flow31
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB3_9
@@ -520,7 +520,7 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, s6
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s9
; CHECK-NEXT: s_cbranch_execnz .LBB3_8
-; CHECK-NEXT: .LBB3_9: ; %Flow27
+; CHECK-NEXT: .LBB3_9: ; %Flow29
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
@@ -556,7 +556,7 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v5, s5
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: s_cbranch_execnz .LBB3_12
-; CHECK-NEXT: .LBB3_13: ; %Flow33
+; CHECK-NEXT: .LBB3_13: ; %Flow35
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_and_saveexec_b32 s5, vcc_lo
; CHECK-NEXT: s_cbranch_execz .LBB3_16
@@ -584,7 +584,7 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: flat_store_dwordx4 v[12:13], v[8:11]
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB3_15
-; CHECK-NEXT: .LBB3_16: ; %Flow31
+; CHECK-NEXT: .LBB3_16: ; %Flow33
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s5
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s6
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
@@ -907,10 +907,10 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB6_3
-; CHECK-NEXT: ; %bb.1: ; %Flow41
+; CHECK-NEXT: ; %bb.1: ; %Flow39
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execnz .LBB6_10
-; CHECK-NEXT: .LBB6_2: ; %Flow42
+; CHECK-NEXT: .LBB6_2: ; %Flow40
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s6
; CHECK-NEXT: s_setpc_b64 s[30:31]
; CHECK-NEXT: .LBB6_3: ; %memmove_copy_forward
@@ -940,7 +940,7 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v11, s6
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s9
; CHECK-NEXT: s_cbranch_execnz .LBB6_5
-; CHECK-NEXT: .LBB6_6: ; %Flow36
+; CHECK-NEXT: .LBB6_6: ; %Flow34
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB6_9
@@ -966,11 +966,11 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, s6
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s9
; CHECK-NEXT: s_cbranch_execnz .LBB6_8
-; CHECK-NEXT: .LBB6_9: ; %Flow34
+; CHECK-NEXT: .LBB6_9: ; %Flow32
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
-; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
@@ -1002,15 +1002,15 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v5, s5
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: s_cbranch_execnz .LBB6_12
-; CHECK-NEXT: .LBB6_13: ; %Flow40
+; CHECK-NEXT: .LBB6_13: ; %Flow38
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_and_saveexec_b32 s5, vcc_lo
; CHECK-NEXT: s_cbranch_execz .LBB6_16
; CHECK-NEXT: ; %bb.14: ; %memmove_bwd_main_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v2, vcc_lo, v2, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; CHECK-NEXT: v_add_co_u32 v0, vcc_lo, v0, -16
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v2, vcc_lo, v2, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; CHECK-NEXT: s_mov_b32 s7, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB6_15: ; %memmove_bwd_main_loop
@@ -1030,7 +1030,7 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: global_store_dwordx4 v[12:13], v[8:11], off
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB6_15
-; CHECK-NEXT: .LBB6_16: ; %Flow38
+; CHECK-NEXT: .LBB6_16: ; %Flow36
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s5
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s6
; CHECK-NEXT: s_setpc_b64 s[30:31]
@@ -1181,8 +1181,8 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: .LBB8_9: ; %Flow31
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
-; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
+; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
@@ -1219,10 +1219,10 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: s_and_saveexec_b32 s5, vcc_lo
; CHECK-NEXT: s_cbranch_execz .LBB8_16
; CHECK-NEXT: ; %bb.14: ; %memmove_bwd_main_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v2, vcc_lo, v2, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; CHECK-NEXT: v_add_co_u32 v0, vcc_lo, v0, -16
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, -1, v1, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v2, vcc_lo, v2, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, -1, v3, vcc_lo
; CHECK-NEXT: s_mov_b32 s7, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB8_15: ; %memmove_bwd_main_loop
diff --git a/llvm/test/CodeGen/AMDGPU/postra-sched-attribute.ll b/llvm/test/CodeGen/AMDGPU/postra-sched-attribute.ll
new file mode 100644
index 0000000..c4a48a46
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/postra-sched-attribute.ll
@@ -0,0 +1,34 @@
+; REQUIRES: asserts
+
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -debug-only=gcn-subtarget < %s 2>&1 | FileCheck %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 < %s 2>&1 | FileCheck -check-prefixes=WARNING %s
+
+; CHECK: Post-MI-sched direction (postra-sched-topdown): topdown
+define float @postra-sched-topdown(float %input) nounwind #0 {
+ %x = fadd float %input, 1.000000e+00
+ ret float %x
+}
+
+; CHECK: Post-MI-sched direction (postra-sched-bottomup): bottomup
+define float @postra-sched-bottomup(float %input) nounwind #1 {
+ %x = fsub float %input, 1.000000e+00
+ ret float %x
+}
+
+; CHECK: Post-MI-sched direction (postra-sched-bidirectional): bidirectional
+define float @postra-sched-bidirectional(float %input) nounwind #2 {
+ %x = fadd float %input, 1.000000e+00
+ ret float %x
+}
+
+; CHECK: Post-MI-sched direction (postra-sched-warning): topdown
+; WARNING: invalid value for postRA direction attribute
+define float @postra-sched-warning(float %input) nounwind #3 {
+ %x = fsub float %input, 1.000000e+00
+ ret float %x
+}
+
+attributes #0 = {"amdgpu-post-ra-direction"="topdown"}
+attributes #1 = {"amdgpu-post-ra-direction"="bottomup"}
+attributes #2 = {"amdgpu-post-ra-direction"="bidirectional"}
+attributes #3 = {"amdgpu-post-ra-direction"="warning"}
diff --git a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
index 228420e..9f0ffbc 100644
--- a/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
+++ b/llvm/test/CodeGen/AMDGPU/rcp-pattern.ll
@@ -56,7 +56,7 @@ define float @v_rcp_f32_ieee_unsafe(float %x) #4 {
; R600: ; %bb.0:
; R600-NEXT: CF_END
; R600-NEXT: PAD
- %rcp = fdiv float 1.0, %x
+ %rcp = fdiv afn float 1.0, %x
ret float %rcp
}
@@ -1411,10 +1411,10 @@ define amdgpu_kernel void @s_div_arcp_neg_k_x_pat_f32_daz(ptr addrspace(1) %out)
declare float @llvm.fabs.f32(float) #1
declare float @llvm.sqrt.f32(float) #1
-attributes #0 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
+attributes #2 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
attributes #3 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
-attributes #4 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="ieee,ieee" }
+attributes #4 = { nounwind "denormal-fp-math-f32"="ieee,ieee" }
!0 = !{float 2.500000e+00}
diff --git a/llvm/test/CodeGen/AMDGPU/rsq.f32.ll b/llvm/test/CodeGen/AMDGPU/rsq.f32.ll
index f7e0388..f967e95 100644
--- a/llvm/test/CodeGen/AMDGPU/rsq.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/rsq.f32.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=preserve-sign -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,SI-DAZ-UNSAFE %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=ieee -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,SI-IEEE-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=preserve-sign < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,SI-DAZ-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -denormal-fp-math-f32=ieee < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,SI-IEEE-UNSAFE %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=preserve-sign -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,CI-DAZ-UNSAFE %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=ieee -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,CI-IEEE-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=preserve-sign < %s | FileCheck -check-prefixes=GCN-DAZ,GCN-DAZ-UNSAFE,CI-DAZ-UNSAFE %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=hawaii -denormal-fp-math-f32=ieee < %s | FileCheck -check-prefixes=GCN-IEEE,GCN-IEEE-UNSAFE,CI-IEEE-UNSAFE %s
declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
@@ -65,8 +65,8 @@ define amdgpu_kernel void @rsq_f32(ptr addrspace(1) noalias %out, ptr addrspace(
; GCN-UNSAFE-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GCN-UNSAFE-NEXT: s_endpgm
%val = load float, ptr addrspace(1) %in, align 4
- %sqrt = call contract float @llvm.sqrt.f32(float %val) nounwind readnone
- %div = fdiv contract float 1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val) nounwind readnone
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -103,8 +103,8 @@ define amdgpu_kernel void @rsq_f32_sgpr(ptr addrspace(1) noalias %out, float %va
; GCN-UNSAFE-NEXT: s_mov_b32 s2, -1
; GCN-UNSAFE-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-UNSAFE-NEXT: s_endpgm
- %sqrt = call contract float @llvm.sqrt.f32(float %val) nounwind readnone
- %div = fdiv contract float 1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val) nounwind readnone
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -196,7 +196,7 @@ define amdgpu_kernel void @rsqrt_fmul(ptr addrspace(1) %out, ptr addrspace(1) %i
%x = call contract float @llvm.sqrt.f32(float %a)
%y = fmul contract float %x, %b
- %z = fdiv arcp contract float %c, %y
+ %z = fdiv arcp afn contract float %c, %y
store float %z, ptr addrspace(1) %out.gep
ret void
}
@@ -258,8 +258,8 @@ define amdgpu_kernel void @neg_rsq_f32(ptr addrspace(1) noalias %out, ptr addrsp
; GCN-UNSAFE-NEXT: buffer_store_dword v0, off, s[4:7], 0
; GCN-UNSAFE-NEXT: s_endpgm
%val = load float, ptr addrspace(1) %in, align 4
- %sqrt = call contract float @llvm.sqrt.f32(float %val)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -322,8 +322,8 @@ define amdgpu_kernel void @neg_rsq_neg_f32(ptr addrspace(1) noalias %out, ptr ad
; GCN-UNSAFE-NEXT: s_endpgm
%val = load float, ptr addrspace(1) %in, align 4
%val.fneg = fneg float %val
- %sqrt = call contract float @llvm.sqrt.f32(float %val.fneg)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val.fneg)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
store float %div, ptr addrspace(1) %out, align 4
ret void
}
@@ -343,8 +343,8 @@ define float @v_neg_rsq_neg_f32(float %val) {
; GCN-IEEE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val.fneg = fneg float %val
- %sqrt = call contract float @llvm.sqrt.f32(float %val.fneg)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val.fneg)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
ret float %div
}
@@ -367,8 +367,8 @@ define <2 x float> @v_neg_rsq_neg_v2f32(<2 x float> %val) {
; GCN-IEEE-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val.fneg = fneg <2 x float> %val
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val.fneg)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val.fneg)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
ret <2 x float> %div
}
@@ -387,8 +387,8 @@ define float @v_neg_rsq_neg_f32_foldable_user(float %val0, float %val1) {
; GCN-IEEE-NEXT: v_mul_f32_e64 v0, -v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val0.neg = fneg float %val0
- %sqrt = call contract float @llvm.sqrt.f32(float %val0.neg)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0.neg)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
%user = fmul contract float %div, %val1
ret float %user
}
@@ -412,8 +412,8 @@ define <2 x float> @v_neg_rsq_neg_v2f32_foldable_user(<2 x float> %val0, <2 x fl
; GCN-IEEE-NEXT: v_mul_f32_e64 v1, -v1, v3
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
%val0.fneg = fneg <2 x float> %val0
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0.fneg)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0.fneg)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
%user = fmul contract <2 x float> %div, %val1
ret <2 x float> %user
}
@@ -432,8 +432,8 @@ define float @v_neg_rsq_f32(float %val) {
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
ret float %div
}
@@ -455,8 +455,8 @@ define <2 x float> @v_neg_rsq_v2f32(<2 x float> %val) {
; GCN-IEEE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
; GCN-IEEE-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
ret <2 x float> %div
}
@@ -474,8 +474,8 @@ define float @v_neg_rsq_f32_foldable_user(float %val0, float %val1) {
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_mul_f32_e64 v0, -v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0)
- %div = fdiv contract float -1.0, %sqrt, !fpmath !0
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0)
+ %div = fdiv afn contract float -1.0, %sqrt, !fpmath !0
%user = fmul contract float %div, %val1
ret float %user
}
@@ -643,8 +643,8 @@ define <2 x float> @v_neg_rsq_v2f32_foldable_user(<2 x float> %val0, <2 x float>
; CI-IEEE-SAFE-NEXT: v_mul_f32_e32 v0, v0, v2
; CI-IEEE-SAFE-NEXT: v_mul_f32_e32 v1, v1, v3
; CI-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0)
- %div = fdiv contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
+ %sqrt = call afn contract <2 x float> @llvm.sqrt.v2f32(<2 x float> %val0)
+ %div = fdiv afn contract <2 x float> <float -1.0, float -1.0>, %sqrt, !fpmath !0
%user = fmul contract <2 x float> %div, %val1
ret <2 x float> %user
}
@@ -672,8 +672,8 @@ define float @v_rsq_f32(float %val) {
; GCN-IEEE-SAFE-NEXT: v_cndmask_b32_e64 v1, 0, 12, vcc
; GCN-IEEE-SAFE-NEXT: v_ldexp_f32_e32 v0, v0, v1
; GCN-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
ret float %div
}
@@ -756,9 +756,9 @@ define { float, float } @v_rsq_f32_multi_use(float %val) {
; CI-IEEE-SAFE-NEXT: v_sub_i32_e32 v2, vcc, 0, v2
; CI-IEEE-SAFE-NEXT: v_ldexp_f32_e32 v1, v1, v2
; CI-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
%insert.0 = insertvalue { float, float } poison, float %sqrt, 0
- %div = fdiv arcp contract float 1.0, %sqrt, !fpmath !1
+ %div = fdiv arcp afn contract float 1.0, %sqrt, !fpmath !1
%insert.1 = insertvalue { float, float } %insert.0, float %div, 1
ret { float, float } %insert.1
}
@@ -838,8 +838,8 @@ define float @v_rsq_f32_missing_contract0(float %val) {
; CI-IEEE-SAFE-NEXT: v_sub_i32_e32 v0, vcc, 0, v0
; CI-IEEE-SAFE-NEXT: v_ldexp_f32_e32 v0, v1, v0
; CI-IEEE-SAFE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv arcp contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv arcp afn contract float 1.0, %sqrt, !fpmath !1
ret float %div
}
@@ -855,8 +855,8 @@ define float @v_rsq_f32_missing_contract1(float %val) {
; GCN-IEEE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv arcp float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv arcp afn float 1.0, %sqrt, !fpmath !1
ret float %div
}
@@ -876,8 +876,8 @@ define float @v_rsq_f32_contractable_user(float %val0, float %val1) {
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_add_f32_e32 v0, v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
%add = fadd contract float %div, %val1
ret float %add
}
@@ -897,8 +897,8 @@ define float @v_rsq_f32_contractable_user_missing_contract0(float %val0, float %
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_add_f32_e32 v0, v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
%add = fadd contract float %div, %val1
ret float %add
}
@@ -918,8 +918,8 @@ define float @v_rsq_f32_contractable_user_missing_contract1(float %val0, float %
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: v_add_f32_e32 v0, v0, v1
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val0), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val0), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
%add = fadd float %div, %val1
ret float %add
}
@@ -953,8 +953,8 @@ define float @v_rsq_f32_known_never_posdenormal(float nofpclass(psub) %val) {
; GCN-IEEE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-IEEE-NEXT: v_rsq_f32_e32 v0, v0
; GCN-IEEE-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call contract float @llvm.sqrt.f32(float %val), !fpmath !1
- %div = fdiv contract float 1.0, %sqrt, !fpmath !1
+ %sqrt = call afn contract float @llvm.sqrt.f32(float %val), !fpmath !1
+ %div = fdiv afn contract float 1.0, %sqrt, !fpmath !1
ret float %div
}
diff --git a/llvm/test/CodeGen/AMDGPU/rsq.f64.ll b/llvm/test/CodeGen/AMDGPU/rsq.f64.ll
index b78cbb0..4aac193 100644
--- a/llvm/test/CodeGen/AMDGPU/rsq.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/rsq.f64.ll
@@ -4504,7 +4504,7 @@ define <2 x double> @v_rsq_v2f64__afn_nnan_ninf(<2 x double> %x) {
ret <2 x double> %rsq
}
-define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) #0 {
+define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) {
; SI-SDAG-LABEL: s_rsq_f64_unsafe:
; SI-SDAG: ; %bb.0:
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0
@@ -4648,8 +4648,8 @@ define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) #0 {
; VI-GISEL-NEXT: v_readfirstlane_b32 s0, v0
; VI-GISEL-NEXT: v_readfirstlane_b32 s1, v1
; VI-GISEL-NEXT: ; return to shader part epilog
- %rsq = call contract double @llvm.sqrt.f64(double %x)
- %result = fdiv contract double 1.0, %rsq
+ %rsq = call contract afn double @llvm.sqrt.f64(double %x)
+ %result = fdiv contract afn double 1.0, %rsq
%cast = bitcast double %result to <2 x i32>
%cast.0 = extractelement <2 x i32> %cast, i32 0
%cast.1 = extractelement <2 x i32> %cast, i32 1
@@ -4660,7 +4660,7 @@ define amdgpu_ps <2 x i32> @s_rsq_f64_unsafe(double inreg %x) #0 {
ret <2 x i32> %insert.1
}
-define double @v_rsq_f64_unsafe(double %x) #0 {
+define double @v_rsq_f64_unsafe(double %x) {
; SI-SDAG-LABEL: v_rsq_f64_unsafe:
; SI-SDAG: ; %bb.0:
; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -4800,8 +4800,8 @@ define double @v_rsq_f64_unsafe(double %x) #0 {
; VI-GISEL-NEXT: v_fma_f64 v[0:1], -v[0:1], v[2:3], 1.0
; VI-GISEL-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], v[2:3]
; VI-GISEL-NEXT: s_setpc_b64 s[30:31]
- %sqrt = call double @llvm.sqrt.f64(double %x)
- %rsq = fdiv double 1.0, %sqrt
+ %sqrt = call afn contract double @llvm.sqrt.f64(double %x)
+ %rsq = fdiv afn contract double 1.0, %sqrt
ret double %rsq
}
@@ -5737,7 +5737,6 @@ define double @v_div_const_contract_sqrt_f64(double %x) {
ret double %rsq
}
-attributes #0 = { "unsafe-fp-math"="true" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GCN: {{.*}}
; GISEL: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/udivrem24.ll b/llvm/test/CodeGen/AMDGPU/udivrem24.ll
index 5477d62..1e5ec59 100644
--- a/llvm/test/CodeGen/AMDGPU/udivrem24.ll
+++ b/llvm/test/CodeGen/AMDGPU/udivrem24.ll
@@ -1,18 +1,103 @@
-; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck -check-prefix=VI %s
+; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG %s
-; FUNC-LABEL: {{^}}udiv24_i8:
-; SI: v_cvt_f32_ubyte
-; SI-DAG: v_cvt_f32_ubyte
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv24_i8(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: udiv24_i8:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:1
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i8:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0 offset:1
+; VI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i8:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.X, T0.X,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: ADD_INT * T1.W, PS, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i8, ptr addrspace(1) %in, i8 1
%num = load i8, ptr addrspace(1) %in
%den = load i8, ptr addrspace(1) %den_ptr
@@ -21,17 +106,101 @@ define amdgpu_kernel void @udiv24_i8(ptr addrspace(1) %out, ptr addrspace(1) %in
ret void
}
-; FUNC-LABEL: {{^}}udiv24_i8_denorm_flush_in_out:
-; SI: v_cvt_f32_ubyte
-; SI-DAG: v_cvt_f32_ubyte
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv24_i8_denorm_flush_in_out(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; SI-LABEL: udiv24_i8_denorm_flush_in_out:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:1
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i8_denorm_flush_in_out:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0 offset:1
+; VI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i8_denorm_flush_in_out:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.X, T0.X,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: ADD_INT * T1.W, PS, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i8, ptr addrspace(1) %in, i8 1
%num = load i8, ptr addrspace(1) %in
%den = load i8, ptr addrspace(1) %den_ptr
@@ -40,17 +209,101 @@ define amdgpu_kernel void @udiv24_i8_denorm_flush_in_out(ptr addrspace(1) %out,
ret void
}
-; FUNC-LABEL: {{^}}udiv24_i8_denorm_flush_in:
-; SI: v_cvt_f32_ubyte
-; SI-DAG: v_cvt_f32_ubyte
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv24_i8_denorm_flush_in(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+; SI-LABEL: udiv24_i8_denorm_flush_in:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:1
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i8_denorm_flush_in:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0 offset:1
+; VI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i8_denorm_flush_in:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.X, T0.X,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: ADD_INT * T1.W, PS, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i8, ptr addrspace(1) %in, i8 1
%num = load i8, ptr addrspace(1) %in
%den = load i8, ptr addrspace(1) %den_ptr
@@ -59,17 +312,101 @@ define amdgpu_kernel void @udiv24_i8_denorm_flush_in(ptr addrspace(1) %out, ptr
ret void
}
-; FUNC-LABEL: {{^}}udiv24_i8_denorm_flush_out:
-; SI: v_cvt_f32_ubyte
-; SI-DAG: v_cvt_f32_ubyte
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv24_i8_denorm_flush_out(ptr addrspace(1) %out, ptr addrspace(1) %in) #2 {
+; SI-LABEL: udiv24_i8_denorm_flush_out:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:1
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i8_denorm_flush_out:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0 offset:1
+; VI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v0, v0
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v1, v1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i8_denorm_flush_out:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.X, T0.X,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: ADD_INT * T1.W, PS, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i8, ptr addrspace(1) %in, i8 1
%num = load i8, ptr addrspace(1) %in
%den = load i8, ptr addrspace(1) %den_ptr
@@ -78,17 +415,101 @@ define amdgpu_kernel void @udiv24_i8_denorm_flush_out(ptr addrspace(1) %out, ptr
ret void
}
-; FUNC-LABEL: {{^}}udiv24_i16:
-; SI: v_cvt_f32_u32
-; SI: v_cvt_f32_u32
-; SI: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv24_i16(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: udiv24_i16:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ushort v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ushort v1, off, s[8:11], 0 offset:2
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_u32_e32 v0, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_u32_e32 v1, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ushort v0, off, s[8:11], 0 offset:2
+; VI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_u32_e32 v0, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_u32_e32 v1, v1
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i16:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 23, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_16 T1.X, T0.X, 2, #1
+; EG-NEXT: VTX_READ_16 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.X, T0.X,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: ADD_INT * T1.W, PS, PV.W,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 65535(9.183409e-41), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i16, ptr addrspace(1) %in, i16 1
%num = load i16, ptr addrspace(1) %in, align 2
%den = load i16, ptr addrspace(1) %den_ptr, align 2
@@ -97,17 +518,85 @@ define amdgpu_kernel void @udiv24_i16(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}udiv23_i32:
-; SI: v_cvt_f32_u32
-; SI-DAG: v_cvt_f32_u32
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @udiv23_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: udiv23_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0x7fffff
+; SI-NEXT: s_and_b32 s5, s5, 0x7fffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv23_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv23_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 18, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Y, PV.W,
+; EG-NEXT: AND_INT T0.W, T0.X, literal.x,
+; EG-NEXT: RECIP_IEEE * T0.X, PS,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Z, PV.W,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: AND_INT T0.X, PV.W, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 8388607(1.175494e-38), 2(2.802597e-45)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -120,11 +609,88 @@ define amdgpu_kernel void @udiv23_i32(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}udiv24_i32:
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @udiv24_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: udiv24_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0xffffff
+; SI-NEXT: s_and_b32 s5, s5, 0xffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv24_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv24_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -137,11 +703,88 @@ define amdgpu_kernel void @udiv24_i32(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}no_udiv24_u23_u24_i32:
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @no_udiv24_u23_u24_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: no_udiv24_u23_u24_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0x7fffff
+; SI-NEXT: s_and_b32 s5, s5, 0xffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: no_udiv24_u23_u24_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: no_udiv24_u23_u24_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -154,11 +797,88 @@ define amdgpu_kernel void @no_udiv24_u23_u24_i32(ptr addrspace(1) %out, ptr addr
ret void
}
-; FUNC-LABEL: {{^}}no_udiv24_u24_u23_i32:
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @no_udiv24_u24_u23_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: no_udiv24_u24_u23_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0xffffff
+; SI-NEXT: s_and_b32 s5, s5, 0x7fffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: no_udiv24_u24_u23_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: no_udiv24_u24_u23_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -171,14 +891,113 @@ define amdgpu_kernel void @no_udiv24_u24_u23_i32(ptr addrspace(1) %out, ptr addr
ret void
}
-; FUNC-LABEL: {{^}}udiv25_i32:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @udiv25_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: udiv25_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0x1ffffff
+; SI-NEXT: s_and_b32 s5, s5, 0x1ffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s5
+; SI-NEXT: s_sub_i32 s6, 0, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s6, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s4, v0
+; SI-NEXT: v_readfirstlane_b32 s6, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_mul_i32 s6, s6, s5
+; SI-NEXT: s_sub_i32 s4, s4, s6
+; SI-NEXT: s_sub_i32 s6, s4, s5
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: s_cselect_b32 s4, s6, s4
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: udiv25_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0x1ffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0x1ffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: udiv25_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -191,14 +1010,113 @@ define amdgpu_kernel void @udiv25_i32(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}test_no_udiv24_i32_1:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @test_no_udiv24_i32_1(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_no_udiv24_i32_1:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0xffffff
+; SI-NEXT: s_and_b32 s5, s5, 0x1ffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s5
+; SI-NEXT: s_sub_i32 s6, 0, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s6, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s4, v0
+; SI-NEXT: v_readfirstlane_b32 s6, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_mul_i32 s6, s6, s5
+; SI-NEXT: s_sub_i32 s4, s4, s6
+; SI-NEXT: s_sub_i32 s6, s4, s5
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: s_cselect_b32 s4, s6, s4
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_no_udiv24_i32_1:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0x1ffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0xffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_no_udiv24_i32_1:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -211,14 +1129,113 @@ define amdgpu_kernel void @test_no_udiv24_i32_1(ptr addrspace(1) %out, ptr addrs
ret void
}
-; FUNC-LABEL: {{^}}test_no_udiv24_i32_2:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @test_no_udiv24_i32_2(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_no_udiv24_i32_2:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0x1ffffff
+; SI-NEXT: s_and_b32 s5, s5, 0xffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s5
+; SI-NEXT: s_sub_i32 s6, 0, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s6, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s4, v0
+; SI-NEXT: v_readfirstlane_b32 s6, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_mul_i32 s6, s6, s5
+; SI-NEXT: s_sub_i32 s4, s4, s6
+; SI-NEXT: s_sub_i32 s6, s4, s5
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: s_cselect_b32 s4, s6, s4
+; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
+; SI-NEXT: s_cmp_ge_u32 s4, s5
+; SI-NEXT: s_cselect_b64 vcc, -1, 0
+; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_no_udiv24_i32_2:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0x1ffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b64 vcc, -1, 0
+; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_no_udiv24_i32_2:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 21, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: ADD_INT T0.Z, T0.X, 1,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T1.W, PV.W, T1.W, PS,
+; EG-NEXT: CNDE_INT * T2.W, PV.W, T0.X, PV.Z,
+; EG-NEXT: ADD_INT T3.W, PS, 1,
+; EG-NEXT: SETGE_UINT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PS, T2.W, PV.W,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -231,17 +1248,107 @@ define amdgpu_kernel void @test_no_udiv24_i32_2(ptr addrspace(1) %out, ptr addrs
ret void
}
-; FUNC-LABEL: {{^}}urem24_i8:
-; SI: v_cvt_f32_ubyte
-; SI-DAG: v_cvt_f32_ubyte
-; SI-DAG: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @urem24_i8(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: urem24_i8:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0 offset:1
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_ubyte0_e32 v3, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v4, v3
+; SI-NEXT: v_mul_f32_e32 v4, v2, v4
+; SI-NEXT: v_trunc_f32_e32 v4, v4
+; SI-NEXT: v_fma_f32 v2, -v4, v3, v2
+; SI-NEXT: v_cvt_u32_f32_e32 v4, v4
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v2|, v3
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v4, vcc
+; SI-NEXT: v_mul_lo_u32 v1, v2, v1
+; SI-NEXT: v_subrev_i32_e32 v0, vcc, v1, v0
+; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: urem24_i8:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ubyte v0, off, s[8:11], 0 offset:1
+; VI-NEXT: buffer_load_ubyte v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v2, v0
+; VI-NEXT: v_rcp_iflag_f32_e32 v3, v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_ubyte0_e32 v4, v1
+; VI-NEXT: v_mul_f32_e32 v3, v4, v3
+; VI-NEXT: v_trunc_f32_e32 v3, v3
+; VI-NEXT: v_cvt_u32_f32_e32 v5, v3
+; VI-NEXT: v_mad_f32 v3, -v3, v2, v4
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, v2
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; VI-NEXT: v_mul_lo_u32 v0, v2, v0
+; VI-NEXT: v_subrev_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: urem24_i8:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 25, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 1, #1
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.W, T0.X,
+; EG-NEXT: MUL_IEEE * T1.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T1.W, PV.W,
+; EG-NEXT: MULADD_IEEE T0.W, -PV.W, T0.Y, T0.W,
+; EG-NEXT: TRUNC * T1.W, PV.W,
+; EG-NEXT: SETGE * T0.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T0.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.Y, T1.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PV.W, T1.X,
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: SUB_INT * T1.W, T0.X, PS,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i8, ptr addrspace(1) %in, i8 1
%num = load i8, ptr addrspace(1) %in
%den = load i8, ptr addrspace(1) %den_ptr
@@ -250,17 +1357,107 @@ define amdgpu_kernel void @urem24_i8(ptr addrspace(1) %out, ptr addrspace(1) %in
ret void
}
-; FUNC-LABEL: {{^}}urem24_i16:
-; SI: v_cvt_f32_u32
-; SI: v_cvt_f32_u32
-; SI: v_rcp_iflag_f32
-; SI: v_cvt_u32_f32
-
-; EG: UINT_TO_FLT
-; EG-DAG: UINT_TO_FLT
-; EG-DAG: RECIP_IEEE
-; EG: FLT_TO_UINT
define amdgpu_kernel void @urem24_i16(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: urem24_i16:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s7, 0xf000
+; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_mov_b32 s10, s6
+; SI-NEXT: s_mov_b32 s11, s7
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_ushort v0, off, s[8:11], 0
+; SI-NEXT: buffer_load_ushort v1, off, s[8:11], 0 offset:2
+; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_u32_e32 v2, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_u32_e32 v3, v1
+; SI-NEXT: v_rcp_iflag_f32_e32 v4, v3
+; SI-NEXT: v_mul_f32_e32 v4, v2, v4
+; SI-NEXT: v_trunc_f32_e32 v4, v4
+; SI-NEXT: v_fma_f32 v2, -v4, v3, v2
+; SI-NEXT: v_cvt_u32_f32_e32 v4, v4
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v2|, v3
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v4, vcc
+; SI-NEXT: v_mul_lo_u32 v1, v2, v1
+; SI-NEXT: v_subrev_i32_e32 v0, vcc, v1, v0
+; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: urem24_i16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s10, s6
+; VI-NEXT: s_mov_b32 s11, s7
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_mov_b32 s8, s2
+; VI-NEXT: s_mov_b32 s9, s3
+; VI-NEXT: buffer_load_ushort v0, off, s[8:11], 0 offset:2
+; VI-NEXT: buffer_load_ushort v1, off, s[8:11], 0
+; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_cvt_f32_u32_e32 v2, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_cvt_f32_u32_e32 v3, v1
+; VI-NEXT: v_rcp_iflag_f32_e32 v4, v2
+; VI-NEXT: v_mul_f32_e32 v4, v3, v4
+; VI-NEXT: v_trunc_f32_e32 v4, v4
+; VI-NEXT: v_cvt_u32_f32_e32 v5, v4
+; VI-NEXT: v_mad_f32 v3, -v4, v2, v3
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, v2
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; VI-NEXT: v_mul_lo_u32 v0, v2, v0
+; VI-NEXT: v_subrev_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: urem24_i16:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @6
+; EG-NEXT: ALU 25, @11, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_16 T1.X, T0.X, 2, #1
+; EG-NEXT: VTX_READ_16 T0.X, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 11:
+; EG-NEXT: UINT_TO_FLT * T0.Y, T1.X,
+; EG-NEXT: RECIP_IEEE * T0.Z, PS,
+; EG-NEXT: UINT_TO_FLT * T0.W, T0.X,
+; EG-NEXT: MUL_IEEE * T1.W, PS, T0.Z,
+; EG-NEXT: TRUNC * T1.W, PV.W,
+; EG-NEXT: MULADD_IEEE T0.W, -PV.W, T0.Y, T0.W,
+; EG-NEXT: TRUNC * T1.W, PV.W,
+; EG-NEXT: SETGE * T0.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T0.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.Y, T1.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.Y, PV.W, T1.X,
+; EG-NEXT: AND_INT T0.W, KC0[2].Y, literal.x,
+; EG-NEXT: SUB_INT * T1.W, T0.X, PS,
+; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.W, PS, literal.x,
+; EG-NEXT: LSHL * T0.W, PV.W, literal.y,
+; EG-NEXT: 65535(9.183409e-41), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i16, ptr addrspace(1) %in, i16 1
%num = load i16, ptr addrspace(1) %in, align 2
%den = load i16, ptr addrspace(1) %den_ptr, align 2
@@ -269,10 +1466,90 @@ define amdgpu_kernel void @urem24_i16(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}urem24_i32:
-; SI-NOT: v_rcp_f32
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @urem24_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: urem24_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s6, s4, 0xffffff
+; SI-NEXT: s_and_b32 s7, s5, 0xffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s6
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s7
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_mul_lo_u32 v0, v0, s5
+; SI-NEXT: v_sub_i32_e32 v0, vcc, s4, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: urem24_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s2, s5, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s2
+; VI-NEXT: s_and_b32 s2, s4, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_mul_lo_u32 v0, v0, s5
+; VI-NEXT: v_sub_u32_e32 v0, vcc, s4, v0
+; VI-NEXT: v_and_b32_e32 v0, 0xffffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: urem24_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.X, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT * T1.W, PV.W, T1.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PV.W, T1.W, PS,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -285,14 +1562,105 @@ define amdgpu_kernel void @urem24_i32(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}urem25_i32:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @urem25_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: urem25_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s2, s4, 0x1ffffff
+; SI-NEXT: s_and_b32 s4, s5, 0x1ffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: s_sub_i32 s5, 0, s4
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s5, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s2, v0
+; SI-NEXT: v_readfirstlane_b32 s5, v0
+; SI-NEXT: s_mul_i32 s5, s5, s4
+; SI-NEXT: s_sub_i32 s2, s2, s5
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s2, s5, s2
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s4, s5, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: urem25_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0x1ffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0x1ffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s4, s6, s5
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: urem25_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.X, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT * T1.W, PV.W, T1.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PV.W, T1.W, PS,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -305,14 +1673,105 @@ define amdgpu_kernel void @urem25_i32(ptr addrspace(1) %out, ptr addrspace(1) %i
ret void
}
-; FUNC-LABEL: {{^}}test_no_urem24_i32_1:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @test_no_urem24_i32_1(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_no_urem24_i32_1:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s2, s4, 0xffffff
+; SI-NEXT: s_and_b32 s4, s5, 0x1ffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: s_sub_i32 s5, 0, s4
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s5, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s2, v0
+; SI-NEXT: v_readfirstlane_b32 s5, v0
+; SI-NEXT: s_mul_i32 s5, s5, s4
+; SI-NEXT: s_sub_i32 s2, s2, s5
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s2, s5, s2
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s4, s5, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_no_urem24_i32_1:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0x1ffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0xffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s4, s6, s5
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_no_urem24_i32_1:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.X, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT * T1.W, PV.W, T1.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PV.W, T1.W, PS,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -325,14 +1784,105 @@ define amdgpu_kernel void @test_no_urem24_i32_1(ptr addrspace(1) %out, ptr addrs
ret void
}
-; FUNC-LABEL: {{^}}test_no_urem24_i32_2:
; RCP_IFLAG is for URECIP in the full 32b alg
-; SI: v_rcp_iflag
-; SI-NOT: v_rcp_f32
-
-; EG-NOT: UINT_TO_FLT
-; EG-NOT: RECIP_IEEE
define amdgpu_kernel void @test_no_urem24_i32_2(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_no_urem24_i32_2:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s2, s4, 0x1ffffff
+; SI-NEXT: s_and_b32 s4, s5, 0xffffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: s_sub_i32 s5, 0, s4
+; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; SI-NEXT: v_mul_lo_u32 v1, s5, v0
+; SI-NEXT: v_mul_hi_u32 v1, v0, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; SI-NEXT: v_mul_hi_u32 v0, s2, v0
+; SI-NEXT: v_readfirstlane_b32 s5, v0
+; SI-NEXT: s_mul_i32 s5, s5, s4
+; SI-NEXT: s_sub_i32 s2, s2, s5
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s2, s5, s2
+; SI-NEXT: s_sub_i32 s5, s2, s4
+; SI-NEXT: s_cmp_ge_u32 s2, s4
+; SI-NEXT: s_cselect_b32 s4, s5, s2
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_no_urem24_i32_2:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s4, s3, 0xffffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT: s_sub_i32 s3, 0, s4
+; VI-NEXT: s_and_b32 s5, s2, 0x1ffffff
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
+; VI-NEXT: v_mul_lo_u32 v1, s3, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_mul_hi_u32 v1, v0, v1
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; VI-NEXT: v_mul_hi_u32 v0, s5, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s4
+; VI-NEXT: s_sub_i32 s5, s5, s6
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s5, s6, s5
+; VI-NEXT: s_sub_i32 s6, s5, s4
+; VI-NEXT: s_cmp_ge_u32 s5, s4
+; VI-NEXT: s_cselect_b32 s4, s6, s5
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_no_urem24_i32_2:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 19, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: SUB_INT T1.W, 0.0, PV.W,
+; EG-NEXT: RECIP_UINT * T0.Y, PV.W,
+; EG-NEXT: MULLO_INT * T0.Z, PV.W, PS,
+; EG-NEXT: MULHI * T0.Z, T0.Y, PS,
+; EG-NEXT: ADD_INT T1.W, T0.Y, PS,
+; EG-NEXT: AND_INT * T2.W, T0.X, literal.x,
+; EG-NEXT: 33554431(9.403954e-38), 0(0.000000e+00)
+; EG-NEXT: MULHI * T0.X, PS, PV.W,
+; EG-NEXT: MULLO_INT * T0.X, PS, T0.W,
+; EG-NEXT: SUB_INT * T1.W, T2.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T3.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT * T1.W, PV.W, T1.W, PS,
+; EG-NEXT: SETGE_UINT T2.W, PV.W, T0.W,
+; EG-NEXT: SUB_INT * T0.W, PV.W, T0.W,
+; EG-NEXT: CNDE_INT T0.X, PV.W, T1.W, PS,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -345,12 +1895,85 @@ define amdgpu_kernel void @test_no_urem24_i32_2(ptr addrspace(1) %out, ptr addrs
ret void
}
-; FUNC-LABEL: {{^}}test_udiv24_u16_u23_i32:
-; SI: v_rcp_iflag_f32
-; SI: v_and_b32_e32 v{{[0-9]+}}, 0x7fffff,
-
-; EG: RECIP_IEEE
define amdgpu_kernel void @test_udiv24_u16_u23_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_udiv24_u16_u23_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0xffff
+; SI-NEXT: s_and_b32 s5, s5, 0x7fffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_udiv24_u16_u23_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0xffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_udiv24_u16_u23_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 18, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Y, PV.W,
+; EG-NEXT: AND_INT T0.W, T0.X, literal.x,
+; EG-NEXT: RECIP_IEEE * T0.X, PS,
+; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Z, PV.W,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: AND_INT T0.X, PV.W, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 8388607(1.175494e-38), 2(2.802597e-45)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
@@ -363,12 +1986,85 @@ define amdgpu_kernel void @test_udiv24_u16_u23_i32(ptr addrspace(1) %out, ptr ad
ret void
}
-; FUNC-LABEL: {{^}}test_udiv24_u23_u16_i32:
-; SI: v_rcp_iflag_f32
-; SI: v_and_b32_e32 v{{[0-9]+}}, 0x7fffff,
-
-; EG: RECIP_IEEE
define amdgpu_kernel void @test_udiv24_u23_u16_i32(ptr addrspace(1) %out, ptr addrspace(1) %in) {
+; SI-LABEL: test_udiv24_u23_u16_i32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_and_b32 s4, s4, 0x7fffff
+; SI-NEXT: s_and_b32 s5, s5, 0xffff
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s4
+; SI-NEXT: v_cvt_f32_u32_e32 v1, s5
+; SI-NEXT: v_rcp_iflag_f32_e32 v2, v1
+; SI-NEXT: v_mul_f32_e32 v2, v0, v2
+; SI-NEXT: v_trunc_f32_e32 v2, v2
+; SI-NEXT: v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT: v_cvt_u32_f32_e32 v2, v2
+; SI-NEXT: v_cmp_ge_f32_e64 vcc, |v0|, v1
+; SI-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; SI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: test_udiv24_u23_u16_i32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_and_b32 s3, s3, 0xffff
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
+; VI-NEXT: s_and_b32 s2, s2, 0x7fffff
+; VI-NEXT: v_cvt_f32_u32_e32 v1, s2
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mul_f32_e32 v2, v1, v2
+; VI-NEXT: v_trunc_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT: v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT: v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; VI-NEXT: v_and_b32_e32 v0, 0x7fffff, v0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
+;
+; EG-LABEL: test_udiv24_u23_u16_i32:
+; EG: ; %bb.0:
+; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @6
+; EG-NEXT: ALU 18, @9, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
+; EG-NEXT: ALU clause starting at 8:
+; EG-NEXT: MOV * T0.X, KC0[2].Z,
+; EG-NEXT: ALU clause starting at 9:
+; EG-NEXT: AND_INT * T0.W, T0.Y, literal.x,
+; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Y, PV.W,
+; EG-NEXT: AND_INT T0.W, T0.X, literal.x,
+; EG-NEXT: RECIP_IEEE * T0.X, PS,
+; EG-NEXT: 8388607(1.175494e-38), 0(0.000000e+00)
+; EG-NEXT: UINT_TO_FLT * T0.Z, PV.W,
+; EG-NEXT: MUL_IEEE * T0.W, PS, T0.X,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: MULADD_IEEE T1.W, -PV.W, T0.Y, T0.Z,
+; EG-NEXT: TRUNC * T0.W, PV.W,
+; EG-NEXT: SETGE * T1.W, |PV.W|, T0.Y,
+; EG-NEXT: CNDE T1.W, PV.W, 0.0, literal.x,
+; EG-NEXT: FLT_TO_UINT * T0.X, T0.W,
+; EG-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: AND_INT T0.X, PV.W, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 8388607(1.175494e-38), 2(2.802597e-45)
%den_ptr = getelementptr i32, ptr addrspace(1) %in, i32 1
%num = load i32, ptr addrspace(1) %in, align 4
%den = load i32, ptr addrspace(1) %den_ptr, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/use-after-free-after-cleanup-failed-vreg.ll b/llvm/test/CodeGen/AMDGPU/use-after-free-after-cleanup-failed-vreg.ll
new file mode 100644
index 0000000..ea12732
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/use-after-free-after-cleanup-failed-vreg.ll
@@ -0,0 +1,16 @@
+; RUN: not llc -mcpu=gfx1100 -mtriple=amdgcn-amd-amdhsa -stress-regalloc=4 -filetype=null -verify-machineinstrs %s 2>&1 | FileCheck %s
+
+; CHECK: error: <unknown>:0:0: ran out of registers during register allocation in function 'f'
+; CHECK-NOT: Bad machine code
+
+define <16 x half> @f(i1 %LGV2, <16 x half> %0) {
+BB:
+ br i1 %LGV2, label %SW_C3, label %SW_C
+
+SW_C: ; preds = %BB
+ %B1 = fmul <16 x half> %0, zeroinitializer
+ ret <16 x half> %B1
+
+SW_C3: ; preds = %BB
+ ret <16 x half> <half 0xH0000, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison, half poison>
+}
diff --git a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
index aea2a8b..f2ecfe8 100644
--- a/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=amdgcn -mcpu=gfx950 < %s | FileCheck -check-prefixes=GFX950 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250 %s
define amdgpu_kernel void @v_ashr_pk_i8_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) #0 {
; GFX950-LABEL: v_ashr_pk_i8_i32:
; GFX950: ; %bb.0:
@@ -13,6 +14,20 @@ define amdgpu_kernel void @v_ashr_pk_i8_i32(ptr addrspace(1) %out, i32 %src0, i3
; GFX950-NEXT: v_ashr_pk_i8_i32 v1, s0, v1, v2
; GFX950-NEXT: global_store_short v0, v1, s[6:7]
; GFX950-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_ashr_pk_i8_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x2c
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_and_b32 s2, s2, 31
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-NEXT: v_ashr_pk_i8_i32 v0, s0, s1, v0
+; GFX1250-NEXT: global_store_b16 v1, v0, s[4:5]
+; GFX1250-NEXT: s_endpgm
%insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0
%build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1
%src2.clamp = and i32 %src2, 31
@@ -40,6 +55,20 @@ define amdgpu_kernel void @v_ashr_pk_u8_i32(ptr addrspace(1) %out, i32 %src0, i3
; GFX950-NEXT: v_ashr_pk_u8_i32 v1, s0, v1, v2
; GFX950-NEXT: global_store_short v0, v1, s[6:7]
; GFX950-NEXT: s_endpgm
+;
+; GFX1250-LABEL: v_ashr_pk_u8_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_load_b96 s[0:2], s[4:5], 0x2c
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_load_b64 s[4:5], s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_and_b32 s2, s2, 31
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, s2
+; GFX1250-NEXT: v_ashr_pk_u8_i32 v0, s0, s1, v0
+; GFX1250-NEXT: global_store_b16 v1, v0, s[4:5]
+; GFX1250-NEXT: s_endpgm
%insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0
%build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1
%src2.clamp = and i32 %src2, 31
diff --git a/llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..8f7b050
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple arm-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..05e1e8b
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeId value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple arm-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..a65e5c5
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple arm-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/ARM/nop_concat_vectors.ll b/llvm/test/CodeGen/ARM/nop_concat_vectors.ll
index cda1e83..aa3cdc3 100644
--- a/llvm/test/CodeGen/ARM/nop_concat_vectors.ll
+++ b/llvm/test/CodeGen/ARM/nop_concat_vectors.ll
@@ -1,10 +1,10 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
-;CHECK: _foo
-;CHECK-NOT: vld1.32
-;CHECK-NOT: vst1.32
-;CHECK: bx
define void @foo(ptr %J) {
+; CHECK-LABEL: foo:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: bx lr
%A = load <16 x i8>, ptr %J
%T1 = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%T2 = shufflevector <8 x i8> %T1, <8 x i8> undef, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll
index 0f711630..4a65a53 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-DescriptorTable-Invalid-RangeType.ll
@@ -2,7 +2,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid Descriptor Range type: Invalid
+; CHECK: error: Invalid Descriptor Range type
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll
index 6551116..031dfca 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-Flags-Error.ll
@@ -2,7 +2,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid Root Signature Element: NOTRootFlags
+; CHECK: error: Invalid Root Signature Element
; CHECK-NOT: Root Signature Definitions
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll
index 579528d..2739320 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-RootDescriptor-Invalid-RegisterKind.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid Root Signature Element: Invalid
+; CHECK: error: Invalid Root Signature Element
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
entry:
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
index 7e7d56e..855e0c0 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MaxLod.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid value for MaxLOD: 0
+; CHECK: error: Invalid value for MaxLOD: nan
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
index d958f10..812749b 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLod.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid value for MinLOD: 0
+; CHECK: error: Invalid value for MinLOD: nan
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
index 34b27eb..6898aec 100644
--- a/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
+++ b/llvm/test/CodeGen/DirectX/ContainerData/RootSignature-StaticSamplers-Invalid-MinLopBias.ll
@@ -3,7 +3,7 @@
target triple = "dxil-unknown-shadermodel6.0-compute"
-; CHECK: error: Invalid value for MipLODBias: 666
+; CHECK: error: Invalid value for MipLODBias: 6.660000e+02
; CHECK-NOT: Root Signature Definitions
define void @main() #0 {
diff --git a/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll b/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll
index b23366b..f5430df 100644
--- a/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll
+++ b/llvm/test/CodeGen/Hexagon/hexagon-strcpy.ll
@@ -1,20 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=hexagon -verify-machineinstrs < %s | FileCheck %s
@.str = private unnamed_addr constant [31 x i8] c"DHRYSTONE PROGRAM, 3'RD STRING\00", align 1
@.str1 = private unnamed_addr constant [3 x i8] c"%s\00", align 1
-; Function Attrs: nounwind
declare i32 @printf(i8* nocapture readonly, ...)
; Function Attrs: nounwind
-define i32 @main() {
+define i32 @main() nounwind {
; CHECK-LABEL: main:
-; CHECK: .cfi_startproc
-; CHECK-NEXT: // %bb.0: // %entry
-; CHECK-NEXT: .cfi_def_cfa r30, 8
-; CHECK-NEXT: .cfi_offset r31, -4
-; CHECK-NEXT: .cfi_offset r30, -8
+; CHECK: // %bb.0: // %entry
; CHECK-NEXT: {
; CHECK-NEXT: r0 = ##.L.str1
; CHECK-NEXT: r3:2 = CONST64(#2325073635944967245)
@@ -53,5 +48,4 @@ entry:
ret i32 0
}
-; Function Attrs: nounwind
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i32, i1)
diff --git a/llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..3f7590a
--- /dev/null
+++ b/llvm/test/CodeGen/MIR/X86/callsite-emit-calleetypeid.ll
@@ -0,0 +1,91 @@
+;; Test MIR printer and parser for type id field in call site info. Test that
+;; it works well with/without --emit-call-site-info.
+
+;; Multiplex --call-graph-section and -emit-call-site-info as both utilize
+;; CallSiteInfo and callSites.
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Test printer and parser with --call-graph-section only.
+
+;; Test printer.
+;; Verify that fwdArgRegs is not set, calleeTypeIds is set.
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc -mtriple=x86_64 --call-graph-section %s -stop-after=finalize-isel -o %t1.mir
+; RUN: cat %t1.mir | FileCheck %s --check-prefix=PRINTER_CGS
+; PRINTER_CGS: name: main
+; PRINTER_CGS: callSites:
+; PRINTER_CGS-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; PRINTER_CGS-NEXT: [ 7854600665770582568 ] }
+
+
+;; Test parser.
+;; Verify that we get the same result.
+; RUN: llc -mtriple=x86_64 --call-graph-section %t1.mir -run-pass=finalize-isel -o - \
+; RUN: | FileCheck %s --check-prefix=PARSER_CGS
+; PARSER_CGS: name: main
+; PARSER_CGS: callSites:
+; PARSER_CGS-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; PARSER_CGS-NEXT: [ 7854600665770582568 ] }
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Test printer and parser with -emit-call-site-info only.
+
+;; Test printer.
+;; Verify that fwdArgRegs is set, calleeTypeIds is not set.
+; RUN: llc -mtriple=x86_64 -emit-call-site-info %s -stop-after=finalize-isel -o %t2.mir
+; RUN: cat %t2.mir | FileCheck %s --check-prefix=PRINTER_CSI
+; PRINTER_CSI: name: main
+; PRINTER_CSI: callSites:
+; PRINTER_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PRINTER_CSI-NEXT: { arg: 0, reg: {{.*}} }
+; PRINTER_CSI-NOT: calleeTypeIds:
+
+
+;; Test parser.
+;; Verify that we get the same result.
+; RUN: llc -mtriple=x86_64 -emit-call-site-info %t2.mir -run-pass=finalize-isel -o - \
+; RUN: | FileCheck %s --check-prefix=PARSER_CSI
+; PARSER_CSI: name: main
+; PARSER_CSI: callSites:
+; PARSER_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PARSER_CSI-NEXT: { arg: 0, reg: {{.*}} }
+; PARSER_CSI-NOT: calleeTypeIds:
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Test printer and parser with both -emit-call-site-info and --call-graph-section.
+
+;; Test printer.
+;; Verify both fwdArgRegs and calleeTypeIds are set.
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc -mtriple=x86_64 --call-graph-section -emit-call-site-info %s -stop-after=finalize-isel -o %t2.mir
+; RUN: cat %t2.mir | FileCheck %s --check-prefix=PRINTER_CGS_CSI
+; PRINTER_CGS_CSI: name: main
+; PRINTER_CGS_CSI: callSites:
+; PRINTER_CGS_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PRINTER_CGS_CSI-NEXT: { arg: 0, reg: {{.*}} }, calleeTypeIds:
+; PRINTER_CGS_CSI-NEXT: [ 7854600665770582568 ] }
+
+
+;; Test parser.
+;; Verify that we get the same result.
+; RUN: llc -mtriple=x86_64 --call-graph-section -emit-call-site-info %t2.mir -run-pass=finalize-isel -o - \
+; RUN: | FileCheck %s --check-prefix=PARSER_CGS_CSI
+; PARSER_CGS_CSI: name: main
+; PARSER_CGS_CSI: callSites:
+; PARSER_CGS_CSI-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs:
+; PARSER_CGS_CSI-NEXT: { arg: 0, reg: {{.*}} }, calleeTypeIds:
+; PARSER_CGS_CSI-NEXT: [ 7854600665770582568 ] }
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..a66a884
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple mips-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..e7f162c
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeId value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=mips-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..9f5e858
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=mips-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/NVPTX/fold-movs.ll b/llvm/test/CodeGen/NVPTX/fold-movs.ll
new file mode 100644
index 0000000..6ee0fb2
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/fold-movs.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx88 -O3 -disable-post-ra \
+; RUN: -frame-pointer=all -verify-machineinstrs \
+; RUN: | FileCheck %s --check-prefixes=CHECK-F32X2
+; RUN: %if ptxas-12.7 %{ \
+; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx88 -O3 -disable-post-ra \
+; RUN: -frame-pointer=all -verify-machineinstrs | %ptxas-verify -arch=sm_100 \
+; RUN: %}
+target triple = "nvptx64-nvidia-cuda"
+
+; Since fdiv doesn't support f32x2, this will create BUILD_VECTORs that will be
+; folded into the store, turning it into st.global.v8.b32.
+define void @writevec(<8 x float> %v1, <8 x float> %v2, ptr addrspace(1) %p) {
+; CHECK-F32X2-LABEL: writevec(
+; CHECK-F32X2: {
+; CHECK-F32X2-NEXT: .reg .b32 %r<25>;
+; CHECK-F32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-F32X2-EMPTY:
+; CHECK-F32X2-NEXT: // %bb.0:
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [writevec_param_0];
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [writevec_param_0+16];
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r9, %r10, %r11, %r12}, [writevec_param_1+16];
+; CHECK-F32X2-NEXT: div.rn.f32 %r13, %r8, %r12;
+; CHECK-F32X2-NEXT: div.rn.f32 %r14, %r7, %r11;
+; CHECK-F32X2-NEXT: div.rn.f32 %r15, %r6, %r10;
+; CHECK-F32X2-NEXT: div.rn.f32 %r16, %r5, %r9;
+; CHECK-F32X2-NEXT: ld.param.v4.b32 {%r17, %r18, %r19, %r20}, [writevec_param_1];
+; CHECK-F32X2-NEXT: div.rn.f32 %r21, %r4, %r20;
+; CHECK-F32X2-NEXT: div.rn.f32 %r22, %r3, %r19;
+; CHECK-F32X2-NEXT: div.rn.f32 %r23, %r2, %r18;
+; CHECK-F32X2-NEXT: div.rn.f32 %r24, %r1, %r17;
+; CHECK-F32X2-NEXT: ld.param.b64 %rd1, [writevec_param_2];
+; CHECK-F32X2-NEXT: st.global.v8.b32 [%rd1], {%r24, %r23, %r22, %r21, %r16, %r15, %r14, %r13};
+; CHECK-F32X2-NEXT: ret;
+ %v = fdiv <8 x float> %v1, %v2
+ store <8 x float> %v, ptr addrspace(1) %p, align 32
+ ret void
+}
diff --git a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
index 06c2cc8..26336b8 100644
--- a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
@@ -343,61 +343,77 @@ define <4 x i8> @test_smax(<4 x i8> %a, <4 x i8> %b) #0 {
; O0-LABEL: test_smax(
; O0: {
; O0-NEXT: .reg .pred %p<5>;
-; O0-NEXT: .reg .b32 %r<18>;
+; O0-NEXT: .reg .b32 %r<26>;
; O0-EMPTY:
; O0-NEXT: // %bb.0:
; O0-NEXT: ld.param.b32 %r2, [test_smax_param_1];
; O0-NEXT: ld.param.b32 %r1, [test_smax_param_0];
-; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O0-NEXT: setp.gt.s32 %p1, %r4, %r3;
-; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O0-NEXT: setp.gt.s32 %p2, %r6, %r5;
-; O0-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O0-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O0-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O0-NEXT: setp.gt.s32 %p3, %r8, %r7;
-; O0-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O0-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O0-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O0-NEXT: setp.gt.s32 %p4, %r10, %r9;
-; O0-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O0-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O0-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O0-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O0-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O0-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O0-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O0-NEXT: st.param.b32 [func_retval0], %r17;
+; O0-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O0-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O0-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O0-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O0-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O0-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O0-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O0-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O0-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O0-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O0-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O0-NEXT: st.param.b32 [func_retval0], %r25;
; O0-NEXT: ret;
;
; O3-LABEL: test_smax(
; O3: {
; O3-NEXT: .reg .pred %p<5>;
-; O3-NEXT: .reg .b32 %r<18>;
+; O3-NEXT: .reg .b32 %r<26>;
; O3-EMPTY:
; O3-NEXT: // %bb.0:
; O3-NEXT: ld.param.b32 %r1, [test_smax_param_0];
; O3-NEXT: ld.param.b32 %r2, [test_smax_param_1];
-; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O3-NEXT: setp.gt.s32 %p1, %r4, %r3;
-; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O3-NEXT: setp.gt.s32 %p2, %r6, %r5;
-; O3-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O3-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O3-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O3-NEXT: setp.gt.s32 %p3, %r8, %r7;
-; O3-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O3-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O3-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O3-NEXT: setp.gt.s32 %p4, %r10, %r9;
-; O3-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O3-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O3-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O3-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O3-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O3-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O3-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O3-NEXT: st.param.b32 [func_retval0], %r17;
+; O3-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O3-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O3-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O3-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O3-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O3-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O3-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O3-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O3-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O3-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O3-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O3-NEXT: st.param.b32 [func_retval0], %r25;
; O3-NEXT: ret;
%cmp = icmp sgt <4 x i8> %a, %b
%r = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %b
@@ -473,61 +489,77 @@ define <4 x i8> @test_smin(<4 x i8> %a, <4 x i8> %b) #0 {
; O0-LABEL: test_smin(
; O0: {
; O0-NEXT: .reg .pred %p<5>;
-; O0-NEXT: .reg .b32 %r<18>;
+; O0-NEXT: .reg .b32 %r<26>;
; O0-EMPTY:
; O0-NEXT: // %bb.0:
; O0-NEXT: ld.param.b32 %r2, [test_smin_param_1];
; O0-NEXT: ld.param.b32 %r1, [test_smin_param_0];
-; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O0-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O0-NEXT: setp.le.s32 %p1, %r4, %r3;
-; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O0-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O0-NEXT: setp.le.s32 %p2, %r6, %r5;
-; O0-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O0-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O0-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O0-NEXT: setp.le.s32 %p3, %r8, %r7;
-; O0-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O0-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O0-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O0-NEXT: setp.le.s32 %p4, %r10, %r9;
-; O0-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O0-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O0-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O0-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O0-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O0-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O0-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O0-NEXT: st.param.b32 [func_retval0], %r17;
+; O0-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O0-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O0-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O0-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O0-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O0-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O0-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O0-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O0-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O0-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O0-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O0-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O0-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O0-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O0-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O0-NEXT: st.param.b32 [func_retval0], %r25;
; O0-NEXT: ret;
;
; O3-LABEL: test_smin(
; O3: {
; O3-NEXT: .reg .pred %p<5>;
-; O3-NEXT: .reg .b32 %r<18>;
+; O3-NEXT: .reg .b32 %r<26>;
; O3-EMPTY:
; O3-NEXT: // %bb.0:
; O3-NEXT: ld.param.b32 %r1, [test_smin_param_0];
; O3-NEXT: ld.param.b32 %r2, [test_smin_param_1];
-; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x7770U;
-; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r3, %r2, 0, 0x8880U;
+; O3-NEXT: prmt.b32 %r4, %r1, 0, 0x8880U;
; O3-NEXT: setp.le.s32 %p1, %r4, %r3;
-; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x7771U;
-; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r5, %r2, 0, 0x9991U;
+; O3-NEXT: prmt.b32 %r6, %r1, 0, 0x9991U;
; O3-NEXT: setp.le.s32 %p2, %r6, %r5;
-; O3-NEXT: prmt.b32 %r7, %r2, 0, 0x7772U;
-; O3-NEXT: prmt.b32 %r8, %r1, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r7, %r2, 0, 0xaaa2U;
+; O3-NEXT: prmt.b32 %r8, %r1, 0, 0xaaa2U;
; O3-NEXT: setp.le.s32 %p3, %r8, %r7;
-; O3-NEXT: prmt.b32 %r9, %r2, 0, 0x7773U;
-; O3-NEXT: prmt.b32 %r10, %r1, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r9, %r2, 0, 0xbbb3U;
+; O3-NEXT: prmt.b32 %r10, %r1, 0, 0xbbb3U;
; O3-NEXT: setp.le.s32 %p4, %r10, %r9;
-; O3-NEXT: selp.b32 %r11, %r10, %r9, %p4;
-; O3-NEXT: selp.b32 %r12, %r8, %r7, %p3;
-; O3-NEXT: prmt.b32 %r13, %r12, %r11, 0x3340U;
-; O3-NEXT: selp.b32 %r14, %r6, %r5, %p2;
-; O3-NEXT: selp.b32 %r15, %r4, %r3, %p1;
-; O3-NEXT: prmt.b32 %r16, %r15, %r14, 0x3340U;
-; O3-NEXT: prmt.b32 %r17, %r16, %r13, 0x5410U;
-; O3-NEXT: st.param.b32 [func_retval0], %r17;
+; O3-NEXT: prmt.b32 %r11, %r2, 0, 0x7770U;
+; O3-NEXT: prmt.b32 %r12, %r2, 0, 0x7771U;
+; O3-NEXT: prmt.b32 %r13, %r2, 0, 0x7772U;
+; O3-NEXT: prmt.b32 %r14, %r2, 0, 0x7773U;
+; O3-NEXT: prmt.b32 %r15, %r1, 0, 0x7773U;
+; O3-NEXT: selp.b32 %r16, %r15, %r14, %p4;
+; O3-NEXT: prmt.b32 %r17, %r1, 0, 0x7772U;
+; O3-NEXT: selp.b32 %r18, %r17, %r13, %p3;
+; O3-NEXT: prmt.b32 %r19, %r18, %r16, 0x3340U;
+; O3-NEXT: prmt.b32 %r20, %r1, 0, 0x7771U;
+; O3-NEXT: selp.b32 %r21, %r20, %r12, %p2;
+; O3-NEXT: prmt.b32 %r22, %r1, 0, 0x7770U;
+; O3-NEXT: selp.b32 %r23, %r22, %r11, %p1;
+; O3-NEXT: prmt.b32 %r24, %r23, %r21, 0x3340U;
+; O3-NEXT: prmt.b32 %r25, %r24, %r19, 0x5410U;
+; O3-NEXT: st.param.b32 [func_retval0], %r25;
; O3-NEXT: ret;
%cmp = icmp sle <4 x i8> %a, %b
%r = select <4 x i1> %cmp, <4 x i8> %a, <4 x i8> %b
diff --git a/llvm/test/CodeGen/NVPTX/ld-param-sink.ll b/llvm/test/CodeGen/NVPTX/ld-param-sink.ll
new file mode 100644
index 0000000..03523a3
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/ld-param-sink.ll
@@ -0,0 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -verify-machineinstrs | FileCheck %s
+; RUN: %if ptxas %{ llc < %s | %ptxas-verify %}
+
+target triple = "nvptx64-nvidia-cuda"
+
+declare ptr @bar(i64)
+declare i64 @baz()
+
+define ptr @foo(i1 %cond) {
+; CHECK-LABEL: foo(
+; CHECK: {
+; CHECK-NEXT: .reg .pred %p<2>;
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: ld.param.b8 %rs1, [foo_param_0];
+; CHECK-NEXT: and.b16 %rs2, %rs1, 1;
+; CHECK-NEXT: setp.ne.b16 %p1, %rs2, 0;
+; CHECK-NEXT: { // callseq 0, 0
+; CHECK-NEXT: .param .b64 retval0;
+; CHECK-NEXT: call.uni (retval0), baz, ();
+; CHECK-NEXT: ld.param.b64 %rd2, [retval0];
+; CHECK-NEXT: } // callseq 0
+; CHECK-NEXT: @%p1 bra $L__BB0_2;
+; CHECK-NEXT: // %bb.1: // %bb
+; CHECK-NEXT: { // callseq 1, 0
+; CHECK-NEXT: .param .b64 param0;
+; CHECK-NEXT: .param .b64 retval0;
+; CHECK-NEXT: st.param.b64 [param0], %rd2;
+; CHECK-NEXT: call.uni (retval0), bar, (param0);
+; CHECK-NEXT: } // callseq 1
+; CHECK-NEXT: $L__BB0_2: // %common.ret
+; CHECK-NEXT: st.param.b64 [func_retval0], 0;
+; CHECK-NEXT: ret;
+entry:
+ %call = call i64 @baz()
+ br i1 %cond, label %common.ret, label %bb
+
+bb:
+ %tmp = call ptr @bar(i64 %call)
+ br label %common.ret
+
+common.ret:
+ ret ptr null
+}
diff --git a/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll b/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll
index 9f62477..af0942e 100644
--- a/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll
+++ b/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll
@@ -56,155 +56,153 @@ define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.
; CHECK-NEXT: .cfi_offset v29, -240
; CHECK-NEXT: .cfi_offset v30, -224
; CHECK-NEXT: .cfi_offset v31, -208
+; CHECK-NEXT: std 14, 400(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 15, 408(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 2, 728(1)
+; CHECK-NEXT: ld 14, 688(1)
+; CHECK-NEXT: ld 11, 704(1)
+; CHECK-NEXT: std 20, 448(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 21, 456(1) # 8-byte Folded Spill
+; CHECK-NEXT: mr 21, 5
+; CHECK-NEXT: lwa 5, 0(7)
+; CHECK-NEXT: ld 7, 720(1)
; CHECK-NEXT: std 22, 464(1) # 8-byte Folded Spill
; CHECK-NEXT: std 23, 472(1) # 8-byte Folded Spill
-; CHECK-NEXT: mr 22, 5
-; CHECK-NEXT: ld 5, 848(1)
+; CHECK-NEXT: mr 22, 6
+; CHECK-NEXT: ld 6, 848(1)
; CHECK-NEXT: addi 3, 3, 1
-; CHECK-NEXT: mr 11, 7
-; CHECK-NEXT: ld 23, 688(1)
-; CHECK-NEXT: ld 7, 728(1)
+; CHECK-NEXT: ld 15, 736(1)
; CHECK-NEXT: std 18, 432(1) # 8-byte Folded Spill
; CHECK-NEXT: std 19, 440(1) # 8-byte Folded Spill
-; CHECK-NEXT: mr 18, 6
-; CHECK-NEXT: li 6, 9
; CHECK-NEXT: ld 19, 768(1)
-; CHECK-NEXT: ld 2, 760(1)
-; CHECK-NEXT: std 26, 496(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 27, 504(1) # 8-byte Folded Spill
-; CHECK-NEXT: cmpldi 3, 9
-; CHECK-NEXT: ld 27, 816(1)
-; CHECK-NEXT: ld 26, 808(1)
-; CHECK-NEXT: std 14, 400(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 15, 408(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 15, 736(1)
-; CHECK-NEXT: lxv 39, 0(8)
+; CHECK-NEXT: ld 18, 760(1)
; CHECK-NEXT: std 30, 528(1) # 8-byte Folded Spill
; CHECK-NEXT: std 31, 536(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 30, 704(1)
-; CHECK-NEXT: lxv 38, 0(9)
-; CHECK-NEXT: std 20, 448(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 21, 456(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 21, 784(1)
+; CHECK-NEXT: ld 12, 696(1)
+; CHECK-NEXT: lxv 0, 0(9)
+; CHECK-NEXT: std 9, 64(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 10, 72(1) # 8-byte Folded Spill
+; CHECK-NEXT: lxv 1, 0(8)
+; CHECK-NEXT: cmpldi 3, 9
+; CHECK-NEXT: ld 30, 824(1)
+; CHECK-NEXT: std 28, 512(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 29, 520(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 29, 840(1)
+; CHECK-NEXT: ld 28, 832(1)
+; CHECK-NEXT: std 16, 416(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 17, 424(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 23, 784(1)
; CHECK-NEXT: ld 20, 776(1)
; CHECK-NEXT: std 24, 480(1) # 8-byte Folded Spill
; CHECK-NEXT: std 25, 488(1) # 8-byte Folded Spill
-; CHECK-NEXT: iselgt 3, 3, 6
-; CHECK-NEXT: ld 6, 720(1)
+; CHECK-NEXT: ld 25, 800(1)
; CHECK-NEXT: ld 24, 792(1)
-; CHECK-NEXT: std 10, 72(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 7, 80(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 26, 496(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 27, 504(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 27, 816(1)
+; CHECK-NEXT: ld 26, 808(1)
+; CHECK-NEXT: stfd 26, 544(1) # 8-byte Folded Spill
+; CHECK-NEXT: stfd 27, 552(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 17, 752(1)
+; CHECK-NEXT: extswsli 9, 5, 3
+; CHECK-NEXT: lxv 4, 0(14)
+; CHECK-NEXT: std 14, 32(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 12, 40(1) # 8-byte Folded Spill
+; CHECK-NEXT: mulli 0, 5, 40
+; CHECK-NEXT: sldi 14, 5, 5
+; CHECK-NEXT: mulli 31, 5, 24
+; CHECK-NEXT: lxv 38, 0(2)
+; CHECK-NEXT: lxv 2, 0(11)
+; CHECK-NEXT: std 2, 80(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 15, 88(1) # 8-byte Folded Spill
+; CHECK-NEXT: mulli 2, 5, 48
+; CHECK-NEXT: sldi 5, 5, 4
+; CHECK-NEXT: ld 16, 744(1)
+; CHECK-NEXT: lxv 5, 0(10)
+; CHECK-NEXT: std 6, 200(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 29, 192(1) # 8-byte Folded Spill
+; CHECK-NEXT: ld 6, 712(1)
+; CHECK-NEXT: mr 10, 7
+; CHECK-NEXT: add 7, 14, 21
+; CHECK-NEXT: lxv 13, 0(19)
+; CHECK-NEXT: std 8, 48(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 6, 56(1) # 8-byte Folded Spill
+; CHECK-NEXT: mr 8, 11
+; CHECK-NEXT: li 11, 9
+; CHECK-NEXT: iselgt 3, 3, 11
; CHECK-NEXT: addi 3, 3, -2
-; CHECK-NEXT: lxv 6, 0(19)
-; CHECK-NEXT: lxv 11, 0(7)
-; CHECK-NEXT: std 5, 200(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 23, 40(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 6, 48(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 5, 840(1)
-; CHECK-NEXT: lxv 12, 0(6)
-; CHECK-NEXT: rldicl 12, 3, 61, 3
+; CHECK-NEXT: rldicl 11, 3, 61, 3
+; CHECK-NEXT: lxv 3, 0(12)
+; CHECK-NEXT: lxv 40, 0(6)
+; CHECK-NEXT: std 18, 112(1) # 8-byte Folded Spill
; CHECK-NEXT: std 19, 120(1) # 8-byte Folded Spill
+; CHECK-NEXT: add 19, 21, 5
+; CHECK-NEXT: ld 5, 200(1) # 8-byte Folded Reload
+; CHECK-NEXT: lxv 39, 0(10)
+; CHECK-NEXT: addi 3, 7, 32
+; CHECK-NEXT: add 12, 31, 21
; CHECK-NEXT: std 20, 128(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 21, 136(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 24, 144(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 4, 0(21)
-; CHECK-NEXT: ld 25, 800(1)
-; CHECK-NEXT: lxv 33, 0(10)
-; CHECK-NEXT: lxv 32, 0(23)
-; CHECK-NEXT: lxv 36, 0(30)
-; CHECK-NEXT: std 16, 416(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 17, 424(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 17, 752(1)
-; CHECK-NEXT: ld 16, 744(1)
-; CHECK-NEXT: std 28, 512(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 29, 520(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 29, 712(1)
-; CHECK-NEXT: ld 28, 696(1)
-; CHECK-NEXT: std 8, 56(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 9, 64(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 37, 0(28)
-; CHECK-NEXT: lxv 13, 0(29)
-; CHECK-NEXT: mr 8, 29
-; CHECK-NEXT: mr 9, 30
-; CHECK-NEXT: mr 10, 28
-; CHECK-NEXT: std 25, 152(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 23, 136(1) # 8-byte Folded Spill
+; CHECK-NEXT: lxv 33, 0(15)
+; CHECK-NEXT: lxv 32, 0(16)
; CHECK-NEXT: std 26, 160(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 10, 0(15)
-; CHECK-NEXT: lxv 9, 0(16)
-; CHECK-NEXT: li 28, 1
-; CHECK-NEXT: stfd 26, 544(1) # 8-byte Folded Spill
-; CHECK-NEXT: stfd 27, 552(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 8, 0(17)
-; CHECK-NEXT: lxv 7, 0(2)
+; CHECK-NEXT: std 27, 168(1) # 8-byte Folded Spill
+; CHECK-NEXT: lxv 37, 0(17)
+; CHECK-NEXT: lxv 36, 0(18)
+; CHECK-NEXT: std 30, 176(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 28, 184(1) # 8-byte Folded Spill
+; CHECK-NEXT: lxv 12, 0(20)
+; CHECK-NEXT: lxv 11, 0(23)
+; CHECK-NEXT: add 20, 21, 9
; CHECK-NEXT: stfd 28, 560(1) # 8-byte Folded Spill
; CHECK-NEXT: stfd 29, 568(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 5, 0(20)
-; CHECK-NEXT: lxv 3, 0(24)
+; CHECK-NEXT: lxv 10, 0(24)
+; CHECK-NEXT: lxv 9, 0(25)
; CHECK-NEXT: stfd 30, 576(1) # 8-byte Folded Spill
; CHECK-NEXT: stfd 31, 584(1) # 8-byte Folded Spill
-; CHECK-NEXT: lxv 2, 0(25)
-; CHECK-NEXT: lxv 1, 0(26)
+; CHECK-NEXT: lxv 8, 0(26)
+; CHECK-NEXT: lxv 7, 0(27)
+; CHECK-NEXT: addi 12, 12, 32
+; CHECK-NEXT: li 27, 0
+; CHECK-NEXT: mr 26, 21
; CHECK-NEXT: stxv 52, 208(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 53, 224(1) # 16-byte Folded Spill
-; CHECK-NEXT: lxv 0, 0(27)
+; CHECK-NEXT: lxv 6, 0(30)
+; CHECK-NEXT: lxv 41, 0(28)
+; CHECK-NEXT: addi 7, 11, 1
+; CHECK-NEXT: add 11, 0, 21
+; CHECK-NEXT: li 28, 1
; CHECK-NEXT: stxv 54, 240(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 55, 256(1) # 16-byte Folded Spill
+; CHECK-NEXT: lxv 43, 0(29)
+; CHECK-NEXT: lxv 42, 0(5)
; CHECK-NEXT: stxv 56, 272(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 57, 288(1) # 16-byte Folded Spill
+; CHECK-NEXT: addi 11, 11, 32
; CHECK-NEXT: stxv 58, 304(1) # 16-byte Folded Spill
-; CHECK-NEXT: std 5, 192(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 5, 832(1)
; CHECK-NEXT: stxv 59, 320(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 60, 336(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 61, 352(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 62, 368(1) # 16-byte Folded Spill
; CHECK-NEXT: stxv 63, 384(1) # 16-byte Folded Spill
-; CHECK-NEXT: std 15, 88(1) # 8-byte Folded Spill
; CHECK-NEXT: std 16, 96(1) # 8-byte Folded Spill
; CHECK-NEXT: std 17, 104(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 2, 112(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 5, 184(1) # 8-byte Folded Spill
-; CHECK-NEXT: ld 5, 824(1)
-; CHECK-NEXT: std 5, 176(1) # 8-byte Folded Spill
-; CHECK-NEXT: std 27, 168(1) # 8-byte Folded Spill
-; CHECK-NEXT: lwa 5, 0(11)
-; CHECK-NEXT: li 27, 0
-; CHECK-NEXT: ld 7, 176(1) # 8-byte Folded Reload
-; CHECK-NEXT: mulli 6, 5, 40
-; CHECK-NEXT: sldi 0, 5, 4
-; CHECK-NEXT: extswsli 14, 5, 3
-; CHECK-NEXT: lxv 40, 0(7)
-; CHECK-NEXT: ld 7, 184(1) # 8-byte Folded Reload
-; CHECK-NEXT: add 31, 14, 22
-; CHECK-NEXT: add 11, 0, 22
-; CHECK-NEXT: mr 26, 22
-; CHECK-NEXT: addi 3, 11, 32
-; CHECK-NEXT: addi 11, 12, 1
-; CHECK-NEXT: mulli 12, 5, 48
-; CHECK-NEXT: addi 31, 31, 32
-; CHECK-NEXT: add 19, 22, 6
-; CHECK-NEXT: sldi 6, 5, 5
-; CHECK-NEXT: mulli 5, 5, 24
-; CHECK-NEXT: lxv 41, 0(7)
-; CHECK-NEXT: add 20, 22, 6
-; CHECK-NEXT: add 21, 22, 5
-; CHECK-NEXT: ld 5, 192(1) # 8-byte Folded Reload
-; CHECK-NEXT: lxv 43, 0(5)
-; CHECK-NEXT: ld 5, 200(1) # 8-byte Folded Reload
-; CHECK-NEXT: lxv 42, 0(5)
+; CHECK-NEXT: std 24, 144(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 25, 152(1) # 8-byte Folded Spill
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB0_3: # %_loop_2_do_.lr.ph
; CHECK-NEXT: # =>This Loop Header: Depth=1
; CHECK-NEXT: # Child Loop BB0_4 Depth 2
-; CHECK-NEXT: maddld 5, 12, 27, 0
-; CHECK-NEXT: mr 6, 18
-; CHECK-NEXT: mr 29, 21
+; CHECK-NEXT: maddld 5, 2, 27, 0
+; CHECK-NEXT: mr 6, 22
; CHECK-NEXT: mr 30, 20
-; CHECK-NEXT: mr 2, 19
-; CHECK-NEXT: mtctr 11
-; CHECK-NEXT: add 25, 22, 5
-; CHECK-NEXT: maddld 5, 12, 27, 14
-; CHECK-NEXT: add 24, 22, 5
+; CHECK-NEXT: mr 29, 19
+; CHECK-NEXT: mtctr 7
+; CHECK-NEXT: add 25, 21, 5
+; CHECK-NEXT: maddld 5, 2, 27, 14
+; CHECK-NEXT: add 24, 21, 5
+; CHECK-NEXT: maddld 5, 2, 27, 31
+; CHECK-NEXT: add 23, 21, 5
; CHECK-NEXT: mr 5, 26
; CHECK-NEXT: .p2align 5
; CHECK-NEXT: .LBB0_4: # %_loop_2_do_
@@ -212,66 +210,66 @@ define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.
; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lxvp 34, 0(6)
; CHECK-NEXT: lxvp 44, 0(5)
-; CHECK-NEXT: xvmaddadp 39, 45, 35
-; CHECK-NEXT: lxvp 46, 0(24)
-; CHECK-NEXT: xvmaddadp 38, 47, 35
-; CHECK-NEXT: lxvp 48, 0(25)
-; CHECK-NEXT: lxvp 50, 0(29)
-; CHECK-NEXT: lxvp 62, 0(30)
-; CHECK-NEXT: lxvp 60, 0(2)
+; CHECK-NEXT: xvmaddadp 1, 45, 35
+; CHECK-NEXT: lxvp 46, 0(30)
+; CHECK-NEXT: xvmaddadp 0, 47, 35
+; CHECK-NEXT: lxvp 48, 0(29)
+; CHECK-NEXT: lxvp 50, 0(23)
+; CHECK-NEXT: lxvp 62, 0(24)
+; CHECK-NEXT: lxvp 60, 0(25)
; CHECK-NEXT: lxvp 58, 32(6)
; CHECK-NEXT: lxvp 56, 32(5)
-; CHECK-NEXT: lxvp 54, 32(24)
-; CHECK-NEXT: lxvp 52, 32(25)
-; CHECK-NEXT: lxvp 30, 32(29)
-; CHECK-NEXT: lxvp 28, 32(30)
-; CHECK-NEXT: lxvp 26, 32(2)
-; CHECK-NEXT: xvmaddadp 33, 49, 35
-; CHECK-NEXT: xvmaddadp 32, 51, 35
-; CHECK-NEXT: xvmaddadp 37, 63, 35
-; CHECK-NEXT: xvmaddadp 36, 61, 35
-; CHECK-NEXT: xvmaddadp 13, 44, 34
-; CHECK-NEXT: xvmaddadp 12, 46, 34
-; CHECK-NEXT: xvmaddadp 11, 48, 34
-; CHECK-NEXT: xvmaddadp 10, 50, 34
-; CHECK-NEXT: xvmaddadp 9, 62, 34
-; CHECK-NEXT: xvmaddadp 8, 60, 34
-; CHECK-NEXT: xvmaddadp 7, 57, 59
-; CHECK-NEXT: xvmaddadp 6, 55, 59
-; CHECK-NEXT: xvmaddadp 5, 53, 59
-; CHECK-NEXT: xvmaddadp 4, 31, 59
-; CHECK-NEXT: xvmaddadp 3, 29, 59
-; CHECK-NEXT: xvmaddadp 2, 27, 59
-; CHECK-NEXT: xvmaddadp 1, 56, 58
-; CHECK-NEXT: xvmaddadp 0, 54, 58
-; CHECK-NEXT: xvmaddadp 40, 52, 58
+; CHECK-NEXT: lxvp 54, 32(30)
+; CHECK-NEXT: lxvp 52, 32(29)
+; CHECK-NEXT: lxvp 30, 32(23)
+; CHECK-NEXT: lxvp 28, 32(24)
+; CHECK-NEXT: lxvp 26, 32(25)
+; CHECK-NEXT: xvmaddadp 5, 49, 35
+; CHECK-NEXT: xvmaddadp 4, 51, 35
+; CHECK-NEXT: xvmaddadp 3, 63, 35
+; CHECK-NEXT: xvmaddadp 2, 61, 35
+; CHECK-NEXT: xvmaddadp 40, 44, 34
+; CHECK-NEXT: xvmaddadp 39, 46, 34
+; CHECK-NEXT: xvmaddadp 38, 48, 34
+; CHECK-NEXT: xvmaddadp 33, 50, 34
+; CHECK-NEXT: xvmaddadp 32, 62, 34
+; CHECK-NEXT: xvmaddadp 37, 60, 34
+; CHECK-NEXT: xvmaddadp 36, 57, 59
+; CHECK-NEXT: xvmaddadp 13, 55, 59
+; CHECK-NEXT: xvmaddadp 12, 53, 59
+; CHECK-NEXT: xvmaddadp 11, 31, 59
+; CHECK-NEXT: xvmaddadp 10, 29, 59
+; CHECK-NEXT: xvmaddadp 9, 27, 59
+; CHECK-NEXT: xvmaddadp 8, 56, 58
+; CHECK-NEXT: xvmaddadp 7, 54, 58
+; CHECK-NEXT: xvmaddadp 6, 52, 58
; CHECK-NEXT: xvmaddadp 41, 30, 58
; CHECK-NEXT: xvmaddadp 43, 28, 58
; CHECK-NEXT: xvmaddadp 42, 26, 58
; CHECK-NEXT: addi 6, 6, 64
; CHECK-NEXT: addi 5, 5, 64
+; CHECK-NEXT: addi 30, 30, 64
+; CHECK-NEXT: addi 29, 29, 64
+; CHECK-NEXT: addi 23, 23, 64
; CHECK-NEXT: addi 24, 24, 64
; CHECK-NEXT: addi 25, 25, 64
-; CHECK-NEXT: addi 29, 29, 64
-; CHECK-NEXT: addi 30, 30, 64
-; CHECK-NEXT: addi 2, 2, 64
; CHECK-NEXT: bdnz .LBB0_4
; CHECK-NEXT: # %bb.5: # %_loop_2_endl_
; CHECK-NEXT: #
; CHECK-NEXT: addi 28, 28, 6
-; CHECK-NEXT: add 26, 26, 12
-; CHECK-NEXT: add 31, 31, 12
-; CHECK-NEXT: add 19, 19, 12
-; CHECK-NEXT: add 3, 3, 12
-; CHECK-NEXT: add 20, 20, 12
-; CHECK-NEXT: add 21, 21, 12
+; CHECK-NEXT: add 26, 26, 2
+; CHECK-NEXT: add 20, 20, 2
+; CHECK-NEXT: add 11, 11, 2
+; CHECK-NEXT: add 19, 19, 2
+; CHECK-NEXT: add 3, 3, 2
+; CHECK-NEXT: add 12, 12, 2
; CHECK-NEXT: addi 27, 27, 1
; CHECK-NEXT: cmpld 28, 4
; CHECK-NEXT: ble 0, .LBB0_3
; CHECK-NEXT: # %bb.6: # %_loop_1_loopHeader_._return_bb_crit_edge.loopexit
-; CHECK-NEXT: ld 3, 56(1) # 8-byte Folded Reload
+; CHECK-NEXT: ld 3, 48(1) # 8-byte Folded Reload
; CHECK-NEXT: lxv 63, 384(1) # 16-byte Folded Reload
-; CHECK-NEXT: stxv 39, 0(3)
+; CHECK-NEXT: stxv 1, 0(3)
; CHECK-NEXT: ld 3, 64(1) # 8-byte Folded Reload
; CHECK-NEXT: lxv 62, 368(1) # 16-byte Folded Reload
; CHECK-NEXT: lxv 61, 352(1) # 16-byte Folded Reload
@@ -284,7 +282,7 @@ define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.
; CHECK-NEXT: lxv 54, 240(1) # 16-byte Folded Reload
; CHECK-NEXT: lxv 53, 224(1) # 16-byte Folded Reload
; CHECK-NEXT: lxv 52, 208(1) # 16-byte Folded Reload
-; CHECK-NEXT: stxv 38, 0(3)
+; CHECK-NEXT: stxv 0, 0(3)
; CHECK-NEXT: ld 3, 72(1) # 8-byte Folded Reload
; CHECK-NEXT: lfd 31, 584(1) # 8-byte Folded Reload
; CHECK-NEXT: lfd 30, 576(1) # 8-byte Folded Reload
@@ -297,8 +295,8 @@ define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.
; CHECK-NEXT: ld 29, 520(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 28, 512(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 27, 504(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 33, 0(3)
-; CHECK-NEXT: ld 3, 40(1) # 8-byte Folded Reload
+; CHECK-NEXT: stxv 5, 0(3)
+; CHECK-NEXT: ld 3, 32(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 26, 496(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 25, 488(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 24, 480(1) # 8-byte Folded Reload
@@ -310,40 +308,41 @@ define void @foo(ptr %.m, ptr %.n, ptr %.a, ptr %.x, ptr %.l, ptr %.vy01, ptr %.
; CHECK-NEXT: ld 18, 432(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 17, 424(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 16, 416(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 32, 0(3)
-; CHECK-NEXT: ld 3, 48(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 37, 0(10)
-; CHECK-NEXT: stxv 36, 0(9)
-; CHECK-NEXT: stxv 13, 0(8)
+; CHECK-NEXT: stxv 4, 0(3)
+; CHECK-NEXT: ld 3, 40(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 15, 408(1) # 8-byte Folded Reload
; CHECK-NEXT: ld 14, 400(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 12, 0(3)
+; CHECK-NEXT: stxv 3, 0(3)
+; CHECK-NEXT: ld 3, 56(1) # 8-byte Folded Reload
+; CHECK-NEXT: stxv 2, 0(8)
+; CHECK-NEXT: stxv 40, 0(3)
; CHECK-NEXT: ld 3, 80(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 11, 0(3)
+; CHECK-NEXT: stxv 39, 0(10)
+; CHECK-NEXT: stxv 38, 0(3)
; CHECK-NEXT: ld 3, 88(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 10, 0(3)
+; CHECK-NEXT: stxv 33, 0(3)
; CHECK-NEXT: ld 3, 96(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 9, 0(3)
+; CHECK-NEXT: stxv 32, 0(3)
; CHECK-NEXT: ld 3, 104(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 8, 0(3)
+; CHECK-NEXT: stxv 37, 0(3)
; CHECK-NEXT: ld 3, 112(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 7, 0(3)
+; CHECK-NEXT: stxv 36, 0(3)
; CHECK-NEXT: ld 3, 120(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 6, 0(3)
+; CHECK-NEXT: stxv 13, 0(3)
; CHECK-NEXT: ld 3, 128(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 5, 0(3)
+; CHECK-NEXT: stxv 12, 0(3)
; CHECK-NEXT: ld 3, 136(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 4, 0(3)
+; CHECK-NEXT: stxv 11, 0(3)
; CHECK-NEXT: ld 3, 144(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 3, 0(3)
+; CHECK-NEXT: stxv 10, 0(3)
; CHECK-NEXT: ld 3, 152(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 2, 0(3)
+; CHECK-NEXT: stxv 9, 0(3)
; CHECK-NEXT: ld 3, 160(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 1, 0(3)
+; CHECK-NEXT: stxv 8, 0(3)
; CHECK-NEXT: ld 3, 168(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 0, 0(3)
+; CHECK-NEXT: stxv 7, 0(3)
; CHECK-NEXT: ld 3, 176(1) # 8-byte Folded Reload
-; CHECK-NEXT: stxv 40, 0(3)
+; CHECK-NEXT: stxv 6, 0(3)
; CHECK-NEXT: ld 3, 184(1) # 8-byte Folded Reload
; CHECK-NEXT: stxv 41, 0(3)
; CHECK-NEXT: ld 3, 192(1) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/PowerPC/no-ctr-loop-if-exit-in-nested-loop.ll b/llvm/test/CodeGen/PowerPC/no-ctr-loop-if-exit-in-nested-loop.ll
index 799ba63..8fb4c21 100644
--- a/llvm/test/CodeGen/PowerPC/no-ctr-loop-if-exit-in-nested-loop.ll
+++ b/llvm/test/CodeGen/PowerPC/no-ctr-loop-if-exit-in-nested-loop.ll
@@ -40,9 +40,10 @@ define signext i32 @test(ptr noalias %PtrA, ptr noalias %PtrB, i32 signext %LenA
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB0_4: # %if.end9
; CHECK-NEXT: #
-; CHECK-NEXT: lwzx 10, 6, 9
+; CHECK-NEXT: add 9, 3, 9
+; CHECK-NEXT: lwz 10, 4(9)
; CHECK-NEXT: addi 10, 10, 1
-; CHECK-NEXT: stwx 10, 6, 9
+; CHECK-NEXT: stw 10, 4(9)
; CHECK-NEXT: b .LBB0_1
; CHECK-NEXT: .LBB0_5: # %if.then
; CHECK-NEXT: lwax 3, 9, 3
diff --git a/llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..34493ce
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,33 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple riscv64 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+; RUN: llc --call-graph-section -mtriple riscv32 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..6e1fe92
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type operand bundle.
+; RUN: llc --call-graph-section -mtriple riscv64 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+; RUN: llc --call-graph-section -mtriple riscv32 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..1f91f41
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/callsite-emit-calleetypeid.ll
@@ -0,0 +1,21 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type operand bundle.
+; RUN: llc --call-graph-section -mtriple riscv64 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+; RUN: llc --call-graph-section -mtriple riscv32 < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/RISCV/memset-inline.ll b/llvm/test/CodeGen/RISCV/memset-inline.ll
index 1263892..4091524 100644
--- a/llvm/test/CodeGen/RISCV/memset-inline.ll
+++ b/llvm/test/CodeGen/RISCV/memset-inline.ll
@@ -684,13 +684,13 @@ define void @aligned_memset_64(ptr align 64 %a, i8 %value) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @bzero_1(ptr %a) nounwind {
-; RV32-BOTH-LABEL: bzero_1:
+define void @memset_zero_1(ptr %a) nounwind {
+; RV32-BOTH-LABEL: memset_zero_1:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sb zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: bzero_1:
+; RV64-BOTH-LABEL: memset_zero_1:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sb zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -698,25 +698,25 @@ define void @bzero_1(ptr %a) nounwind {
ret void
}
-define void @bzero_2(ptr %a) nounwind {
-; RV32-LABEL: bzero_2:
+define void @memset_zero_2(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_2:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_2:
+; RV64-LABEL: memset_zero_2:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_2:
+; RV32-FAST-LABEL: memset_zero_2:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sh zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_2:
+; RV64-FAST-LABEL: memset_zero_2:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sh zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -724,8 +724,8 @@ define void @bzero_2(ptr %a) nounwind {
ret void
}
-define void @bzero_4(ptr %a) nounwind {
-; RV32-LABEL: bzero_4:
+define void @memset_zero_4(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_4:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
@@ -733,7 +733,7 @@ define void @bzero_4(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_4:
+; RV64-LABEL: memset_zero_4:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
@@ -741,12 +741,12 @@ define void @bzero_4(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_4:
+; RV32-FAST-LABEL: memset_zero_4:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_4:
+; RV64-FAST-LABEL: memset_zero_4:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sw zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -754,8 +754,8 @@ define void @bzero_4(ptr %a) nounwind {
ret void
}
-define void @bzero_8(ptr %a) nounwind {
-; RV32-LABEL: bzero_8:
+define void @memset_zero_8(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_8:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 4(a0)
; RV32-NEXT: sb zero, 5(a0)
@@ -767,7 +767,7 @@ define void @bzero_8(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_8:
+; RV64-LABEL: memset_zero_8:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 4(a0)
; RV64-NEXT: sb zero, 5(a0)
@@ -779,13 +779,13 @@ define void @bzero_8(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_8:
+; RV32-FAST-LABEL: memset_zero_8:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: sw zero, 4(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_8:
+; RV64-FAST-LABEL: memset_zero_8:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -793,8 +793,8 @@ define void @bzero_8(ptr %a) nounwind {
ret void
}
-define void @bzero_16(ptr %a) nounwind {
-; RV32-LABEL: bzero_16:
+define void @memset_zero_16(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_16:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 12(a0)
; RV32-NEXT: sb zero, 13(a0)
@@ -814,7 +814,7 @@ define void @bzero_16(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_16:
+; RV64-LABEL: memset_zero_16:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 12(a0)
; RV64-NEXT: sb zero, 13(a0)
@@ -834,7 +834,7 @@ define void @bzero_16(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_16:
+; RV32-FAST-LABEL: memset_zero_16:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: sw zero, 4(a0)
@@ -842,7 +842,7 @@ define void @bzero_16(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 12(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_16:
+; RV64-FAST-LABEL: memset_zero_16:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: sd zero, 8(a0)
@@ -851,8 +851,8 @@ define void @bzero_16(ptr %a) nounwind {
ret void
}
-define void @bzero_32(ptr %a) nounwind {
-; RV32-LABEL: bzero_32:
+define void @memset_zero_32(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_32:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 28(a0)
; RV32-NEXT: sb zero, 29(a0)
@@ -888,7 +888,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_32:
+; RV64-LABEL: memset_zero_32:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 28(a0)
; RV64-NEXT: sb zero, 29(a0)
@@ -924,7 +924,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_32:
+; RV32-FAST-LABEL: memset_zero_32:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 16(a0)
; RV32-FAST-NEXT: sw zero, 20(a0)
@@ -936,7 +936,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 12(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_32:
+; RV64-FAST-LABEL: memset_zero_32:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: sd zero, 8(a0)
@@ -947,8 +947,8 @@ define void @bzero_32(ptr %a) nounwind {
ret void
}
-define void @bzero_64(ptr %a) nounwind {
-; RV32-LABEL: bzero_64:
+define void @memset_zero_64(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_64:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 60(a0)
; RV32-NEXT: sb zero, 61(a0)
@@ -1016,7 +1016,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_64:
+; RV64-LABEL: memset_zero_64:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 60(a0)
; RV64-NEXT: sb zero, 61(a0)
@@ -1084,7 +1084,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_64:
+; RV32-FAST-LABEL: memset_zero_64:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 48(a0)
; RV32-FAST-NEXT: sw zero, 52(a0)
@@ -1104,7 +1104,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 12(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_64:
+; RV64-FAST-LABEL: memset_zero_64:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 32(a0)
; RV64-FAST-NEXT: sd zero, 40(a0)
@@ -1121,13 +1121,13 @@ define void @bzero_64(ptr %a) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @aligned_bzero_2(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_2:
+define void @aligned_memset_zero_2(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_2:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sh zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_2:
+; RV64-BOTH-LABEL: aligned_memset_zero_2:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sh zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -1135,13 +1135,13 @@ define void @aligned_bzero_2(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_4(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_4:
+define void @aligned_memset_zero_4(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_4:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_4:
+; RV64-BOTH-LABEL: aligned_memset_zero_4:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sw zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -1149,14 +1149,14 @@ define void @aligned_bzero_4(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_8(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_8:
+define void @aligned_memset_zero_8(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_8:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: sw zero, 4(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_8:
+; RV64-BOTH-LABEL: aligned_memset_zero_8:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -1165,8 +1165,8 @@ define void @aligned_bzero_8(ptr %a) nounwind {
}
-define void @aligned_bzero_16(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_16:
+define void @aligned_memset_zero_16(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_16:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: sw zero, 4(a0)
@@ -1174,7 +1174,7 @@ define void @aligned_bzero_16(ptr %a) nounwind {
; RV32-BOTH-NEXT: sw zero, 12(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_16:
+; RV64-BOTH-LABEL: aligned_memset_zero_16:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: sd zero, 8(a0)
@@ -1183,8 +1183,8 @@ define void @aligned_bzero_16(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_32(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_32:
+define void @aligned_memset_zero_32(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_32:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 16(a0)
; RV32-BOTH-NEXT: sw zero, 20(a0)
@@ -1196,7 +1196,7 @@ define void @aligned_bzero_32(ptr %a) nounwind {
; RV32-BOTH-NEXT: sw zero, 12(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_32:
+; RV64-BOTH-LABEL: aligned_memset_zero_32:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: sd zero, 8(a0)
@@ -1207,8 +1207,8 @@ define void @aligned_bzero_32(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_64(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_64:
+define void @aligned_memset_zero_64(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_64:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 48(a0)
; RV32-BOTH-NEXT: sw zero, 52(a0)
@@ -1228,7 +1228,7 @@ define void @aligned_bzero_64(ptr %a) nounwind {
; RV32-BOTH-NEXT: sw zero, 12(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_64:
+; RV64-BOTH-LABEL: aligned_memset_zero_64:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 32(a0)
; RV64-BOTH-NEXT: sd zero, 40(a0)
@@ -1247,28 +1247,28 @@ define void @aligned_bzero_64(ptr %a) nounwind {
; /////////////////////////////////////////////////////////////////////////////
; Usual overlap tricks
-define void @aligned_bzero_7(ptr %a) nounwind {
-; RV32-LABEL: aligned_bzero_7:
+define void @aligned_memset_zero_7(ptr %a) nounwind {
+; RV32-LABEL: aligned_memset_zero_7:
; RV32: # %bb.0:
; RV32-NEXT: sw zero, 0(a0)
; RV32-NEXT: sh zero, 4(a0)
; RV32-NEXT: sb zero, 6(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: aligned_bzero_7:
+; RV64-LABEL: aligned_memset_zero_7:
; RV64: # %bb.0:
; RV64-NEXT: sw zero, 0(a0)
; RV64-NEXT: sh zero, 4(a0)
; RV64-NEXT: sb zero, 6(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: aligned_bzero_7:
+; RV32-FAST-LABEL: aligned_memset_zero_7:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 3(a0)
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: aligned_bzero_7:
+; RV64-FAST-LABEL: aligned_memset_zero_7:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sw zero, 3(a0)
; RV64-FAST-NEXT: sw zero, 0(a0)
@@ -1277,8 +1277,8 @@ define void @aligned_bzero_7(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_15(ptr %a) nounwind {
-; RV32-LABEL: aligned_bzero_15:
+define void @aligned_memset_zero_15(ptr %a) nounwind {
+; RV32-LABEL: aligned_memset_zero_15:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 14(a0)
; RV32-NEXT: sw zero, 0(a0)
@@ -1287,7 +1287,7 @@ define void @aligned_bzero_15(ptr %a) nounwind {
; RV32-NEXT: sh zero, 12(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: aligned_bzero_15:
+; RV64-LABEL: aligned_memset_zero_15:
; RV64: # %bb.0:
; RV64-NEXT: sd zero, 0(a0)
; RV64-NEXT: sw zero, 8(a0)
@@ -1295,7 +1295,7 @@ define void @aligned_bzero_15(ptr %a) nounwind {
; RV64-NEXT: sb zero, 14(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: aligned_bzero_15:
+; RV32-FAST-LABEL: aligned_memset_zero_15:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 11(a0)
; RV32-FAST-NEXT: sw zero, 0(a0)
@@ -1303,7 +1303,7 @@ define void @aligned_bzero_15(ptr %a) nounwind {
; RV32-FAST-NEXT: sw zero, 8(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: aligned_bzero_15:
+; RV64-FAST-LABEL: aligned_memset_zero_15:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 7(a0)
; RV64-FAST-NEXT: sd zero, 0(a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 5747bbb..bd37443 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -554,9 +554,8 @@ define <vscale x 2 x i1> @insert_nxv2i1_v4i1_0(<vscale x 2 x i1> %v, ptr %svp) {
; VLA-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; VLA-NEXT: vmv.v.i v10, 0
; VLA-NEXT: vmv1r.v v0, v8
-; VLA-NEXT: vmerge.vim v8, v10, 1, v0
; VLA-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
-; VLA-NEXT: vmv.v.v v9, v8
+; VLA-NEXT: vmerge.vim v9, v10, 1, v0
; VLA-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; VLA-NEXT: vmsne.vi v0, v9, 0
; VLA-NEXT: ret
@@ -568,9 +567,8 @@ define <vscale x 2 x i1> @insert_nxv2i1_v4i1_0(<vscale x 2 x i1> %v, ptr %svp) {
; VLS-NEXT: vmv.v.i v9, 0
; VLS-NEXT: vmerge.vim v10, v9, 1, v0
; VLS-NEXT: vmv1r.v v0, v8
-; VLS-NEXT: vmerge.vim v8, v9, 1, v0
; VLS-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
-; VLS-NEXT: vmv.v.v v10, v8
+; VLS-NEXT: vmerge.vim v10, v9, 1, v0
; VLS-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; VLS-NEXT: vmsne.vi v0, v10, 0
; VLS-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
index 8963940..2c11bd1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
@@ -360,13 +360,13 @@ define void @aligned_memset_64(ptr align 64 %a, i8 %value) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @bzero_1(ptr %a) nounwind {
-; RV32-BOTH-LABEL: bzero_1:
+define void @memset_zero_1(ptr %a) nounwind {
+; RV32-BOTH-LABEL: memset_zero_1:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sb zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: bzero_1:
+; RV64-BOTH-LABEL: memset_zero_1:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sb zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -374,25 +374,25 @@ define void @bzero_1(ptr %a) nounwind {
ret void
}
-define void @bzero_2(ptr %a) nounwind {
-; RV32-LABEL: bzero_2:
+define void @memset_zero_2(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_2:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_2:
+; RV64-LABEL: memset_zero_2:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_2:
+; RV32-FAST-LABEL: memset_zero_2:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sh zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_2:
+; RV64-FAST-LABEL: memset_zero_2:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sh zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -400,8 +400,8 @@ define void @bzero_2(ptr %a) nounwind {
ret void
}
-define void @bzero_4(ptr %a) nounwind {
-; RV32-LABEL: bzero_4:
+define void @memset_zero_4(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_4:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 0(a0)
; RV32-NEXT: sb zero, 1(a0)
@@ -409,7 +409,7 @@ define void @bzero_4(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_4:
+; RV64-LABEL: memset_zero_4:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 0(a0)
; RV64-NEXT: sb zero, 1(a0)
@@ -417,12 +417,12 @@ define void @bzero_4(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_4:
+; RV32-FAST-LABEL: memset_zero_4:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_4:
+; RV64-FAST-LABEL: memset_zero_4:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sw zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -430,8 +430,8 @@ define void @bzero_4(ptr %a) nounwind {
ret void
}
-define void @bzero_8(ptr %a) nounwind {
-; RV32-LABEL: bzero_8:
+define void @memset_zero_8(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_8:
; RV32: # %bb.0:
; RV32-NEXT: sb zero, 4(a0)
; RV32-NEXT: sb zero, 5(a0)
@@ -443,7 +443,7 @@ define void @bzero_8(ptr %a) nounwind {
; RV32-NEXT: sb zero, 3(a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_8:
+; RV64-LABEL: memset_zero_8:
; RV64: # %bb.0:
; RV64-NEXT: sb zero, 4(a0)
; RV64-NEXT: sb zero, 5(a0)
@@ -455,13 +455,13 @@ define void @bzero_8(ptr %a) nounwind {
; RV64-NEXT: sb zero, 3(a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_8:
+; RV32-FAST-LABEL: memset_zero_8:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: sw zero, 0(a0)
; RV32-FAST-NEXT: sw zero, 4(a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_8:
+; RV64-FAST-LABEL: memset_zero_8:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: sd zero, 0(a0)
; RV64-FAST-NEXT: ret
@@ -469,29 +469,29 @@ define void @bzero_8(ptr %a) nounwind {
ret void
}
-define void @bzero_16(ptr %a) nounwind {
-; RV32-LABEL: bzero_16:
+define void @memset_zero_16(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_16:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV32-NEXT: vmv.v.i v8, 0
; RV32-NEXT: vse8.v v8, (a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_16:
+; RV64-LABEL: memset_zero_16:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
; RV64-NEXT: vse8.v v8, (a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_16:
+; RV32-FAST-LABEL: memset_zero_16:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-FAST-NEXT: vmv.v.i v8, 0
; RV32-FAST-NEXT: vse64.v v8, (a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_16:
+; RV64-FAST-LABEL: memset_zero_16:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-FAST-NEXT: vmv.v.i v8, 0
@@ -501,8 +501,8 @@ define void @bzero_16(ptr %a) nounwind {
ret void
}
-define void @bzero_32(ptr %a) nounwind {
-; RV32-LABEL: bzero_32:
+define void @memset_zero_32(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_32:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV32-NEXT: vmv.v.i v8, 0
@@ -511,7 +511,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-NEXT: vse8.v v8, (a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_32:
+; RV64-LABEL: memset_zero_32:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
@@ -520,7 +520,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV64-NEXT: vse8.v v8, (a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_32:
+; RV32-FAST-LABEL: memset_zero_32:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-FAST-NEXT: vmv.v.i v8, 0
@@ -529,7 +529,7 @@ define void @bzero_32(ptr %a) nounwind {
; RV32-FAST-NEXT: vse64.v v8, (a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_32:
+; RV64-FAST-LABEL: memset_zero_32:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-FAST-NEXT: vmv.v.i v8, 0
@@ -541,8 +541,8 @@ define void @bzero_32(ptr %a) nounwind {
ret void
}
-define void @bzero_64(ptr %a) nounwind {
-; RV32-LABEL: bzero_64:
+define void @memset_zero_64(ptr %a) nounwind {
+; RV32-LABEL: memset_zero_64:
; RV32: # %bb.0:
; RV32-NEXT: li a1, 64
; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -550,7 +550,7 @@ define void @bzero_64(ptr %a) nounwind {
; RV32-NEXT: vse8.v v8, (a0)
; RV32-NEXT: ret
;
-; RV64-LABEL: bzero_64:
+; RV64-LABEL: memset_zero_64:
; RV64: # %bb.0:
; RV64-NEXT: li a1, 64
; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -558,14 +558,14 @@ define void @bzero_64(ptr %a) nounwind {
; RV64-NEXT: vse8.v v8, (a0)
; RV64-NEXT: ret
;
-; RV32-FAST-LABEL: bzero_64:
+; RV32-FAST-LABEL: memset_zero_64:
; RV32-FAST: # %bb.0:
; RV32-FAST-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-FAST-NEXT: vmv.v.i v8, 0
; RV32-FAST-NEXT: vse64.v v8, (a0)
; RV32-FAST-NEXT: ret
;
-; RV64-FAST-LABEL: bzero_64:
+; RV64-FAST-LABEL: memset_zero_64:
; RV64-FAST: # %bb.0:
; RV64-FAST-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-FAST-NEXT: vmv.v.i v8, 0
@@ -577,13 +577,13 @@ define void @bzero_64(ptr %a) nounwind {
; /////////////////////////////////////////////////////////////////////////////
-define void @aligned_bzero_2(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_2:
+define void @aligned_memset_zero_2(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_2:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sh zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_2:
+; RV64-BOTH-LABEL: aligned_memset_zero_2:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sh zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -591,13 +591,13 @@ define void @aligned_bzero_2(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_4(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_4:
+define void @aligned_memset_zero_4(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_4:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_4:
+; RV64-BOTH-LABEL: aligned_memset_zero_4:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sw zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -605,14 +605,14 @@ define void @aligned_bzero_4(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_8(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_8:
+define void @aligned_memset_zero_8(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_8:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sw zero, 0(a0)
; RV32-BOTH-NEXT: sw zero, 4(a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_8:
+; RV64-BOTH-LABEL: aligned_memset_zero_8:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sd zero, 0(a0)
; RV64-BOTH-NEXT: ret
@@ -621,15 +621,15 @@ define void @aligned_bzero_8(ptr %a) nounwind {
}
-define void @aligned_bzero_16(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_16:
+define void @aligned_memset_zero_16(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_16:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_16:
+; RV64-BOTH-LABEL: aligned_memset_zero_16:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -639,8 +639,8 @@ define void @aligned_bzero_16(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_32(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_32:
+define void @aligned_memset_zero_32(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_32:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
@@ -649,7 +649,7 @@ define void @aligned_bzero_32(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_32:
+; RV64-BOTH-LABEL: aligned_memset_zero_32:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -661,15 +661,15 @@ define void @aligned_bzero_32(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_64(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_64:
+define void @aligned_memset_zero_64(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_64:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_64:
+; RV64-BOTH-LABEL: aligned_memset_zero_64:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -679,8 +679,8 @@ define void @aligned_bzero_64(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_66(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_66:
+define void @aligned_memset_zero_66(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_66:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: sh zero, 64(a0)
; RV32-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
@@ -688,7 +688,7 @@ define void @aligned_bzero_66(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_66:
+; RV64-BOTH-LABEL: aligned_memset_zero_66:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: sh zero, 64(a0)
; RV64-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
@@ -699,8 +699,8 @@ define void @aligned_bzero_66(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_96(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_96:
+define void @aligned_memset_zero_96(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_96:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
@@ -713,7 +713,7 @@ define void @aligned_bzero_96(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_96:
+; RV64-BOTH-LABEL: aligned_memset_zero_96:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -729,15 +729,15 @@ define void @aligned_bzero_96(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_128(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_128:
+define void @aligned_memset_zero_128(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_128:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_128:
+; RV64-BOTH-LABEL: aligned_memset_zero_128:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
@@ -747,8 +747,8 @@ define void @aligned_bzero_128(ptr %a) nounwind {
ret void
}
-define void @aligned_bzero_256(ptr %a) nounwind {
-; RV32-BOTH-LABEL: aligned_bzero_256:
+define void @aligned_memset_zero_256(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_memset_zero_256:
; RV32-BOTH: # %bb.0:
; RV32-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-BOTH-NEXT: vmv.v.i v8, 0
@@ -757,7 +757,7 @@ define void @aligned_bzero_256(ptr %a) nounwind {
; RV32-BOTH-NEXT: vse64.v v8, (a0)
; RV32-BOTH-NEXT: ret
;
-; RV64-BOTH-LABEL: aligned_bzero_256:
+; RV64-BOTH-LABEL: aligned_memset_zero_256:
; RV64-BOTH: # %bb.0:
; RV64-BOTH-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-BOTH-NEXT: vmv.v.i v8, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
index 3dc83d5..38d38f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
@@ -1636,3 +1636,49 @@ define <8 x half> @vector_interleave8_v8f16_v1f16(<1 x half> %a, <1 x half> %b,
%res = call <8 x half> @llvm.vector.interleave8.v8f16(<1 x half> %a, <1 x half> %b, <1 x half> %c, <1 x half> %d, <1 x half> %e, <1 x half> %f, <1 x half> %g, <1 x half> %h)
ret <8 x half> %res
}
+
+define <8 x i16> @interleave4_const_splat_v8i16(<2 x i16> %a) {
+; CHECK-LABEL: interleave4_const_splat_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 3
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: interleave4_const_splat_v8i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVBB-NEXT: vmv.v.i v8, 3
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: interleave4_const_splat_v8i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZIP-NEXT: vmv.v.i v8, 3
+; ZIP-NEXT: ret
+ %retval = call <8 x i16> @llvm.vector.interleave4.v8i16(<2 x i16> splat(i16 3), <2 x i16> splat(i16 3), <2 x i16> splat(i16 3), <2 x i16> splat(i16 3))
+ ret <8 x i16> %retval
+}
+
+define <8 x i16> @interleave4_same_nonconst_splat_v8i16(i16 %a) {
+; CHECK-LABEL: interleave4_same_nonconst_splat_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: interleave4_same_nonconst_splat_v8i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZVBB-NEXT: vmv.v.x v8, a0
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: interleave4_same_nonconst_splat_v8i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; ZIP-NEXT: vmv.v.x v8, a0
+; ZIP-NEXT: ret
+ %ins = insertelement <2 x i16> poison, i16 %a, i32 0
+ %splat = shufflevector <2 x i16> %ins, <2 x i16> poison, <2 x i32> zeroinitializer
+ %retval = call <8 x i16> @llvm.vector.interleave4.v8i16(<2 x i16> %splat, <2 x i16> %splat, <2 x i16> %splat, <2 x i16> %splat)
+ ret <8 x i16> %retval
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index 01cc5c5..ee38257 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -14947,3 +14947,147 @@ define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv2f64(<vscale x 2 x
%res = call <vscale x 16 x double> @llvm.vector.interleave8.nxv16f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x double> %v4, <vscale x 2 x double> %v5, <vscale x 2 x double> %v6, <vscale x 2 x double> %v7)
ret <vscale x 16 x double> %res
}
+
+define <vscale x 4 x i16> @interleave2_same_const_splat_nxv4i16() {
+; CHECK-LABEL: interleave2_same_const_splat_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 3
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: interleave2_same_const_splat_nxv4i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vmv.v.i v8, 3
+; ZVBB-NEXT: ret
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3))
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 4 x i16> @interleave2_diff_const_splat_nxv4i16() {
+; V-LABEL: interleave2_diff_const_splat_nxv4i16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; V-NEXT: vmv.v.i v9, 3
+; V-NEXT: li a0, 4
+; V-NEXT: vmv.v.i v10, -1
+; V-NEXT: vwaddu.vx v8, v9, a0
+; V-NEXT: vwmaccu.vx v8, a0, v10
+; V-NEXT: csrr a0, vlenb
+; V-NEXT: srli a0, a0, 2
+; V-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; V-NEXT: vslidedown.vx v9, v8, a0
+; V-NEXT: vslideup.vx v8, v9, a0
+; V-NEXT: ret
+;
+; ZVBB-LABEL: interleave2_diff_const_splat_nxv4i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vmv.v.i v8, 4
+; ZVBB-NEXT: li a0, 3
+; ZVBB-NEXT: vwsll.vi v9, v8, 16
+; ZVBB-NEXT: vwaddu.wx v8, v9, a0
+; ZVBB-NEXT: csrr a0, vlenb
+; ZVBB-NEXT: srli a0, a0, 2
+; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vslidedown.vx v9, v8, a0
+; ZVBB-NEXT: vslideup.vx v8, v9, a0
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: interleave2_diff_const_splat_nxv4i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZIP-NEXT: vmv.v.i v9, 4
+; ZIP-NEXT: vmv.v.i v10, 3
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: ri.vzip2b.vv v11, v10, v9
+; ZIP-NEXT: ri.vzip2a.vv v8, v10, v9
+; ZIP-NEXT: srli a0, a0, 2
+; ZIP-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZIP-NEXT: vslideup.vx v8, v11, a0
+; ZIP-NEXT: ret
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.v4i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 4))
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 4 x i16> @interleave2_same_nonconst_splat_nxv4i16(i16 %a) {
+; CHECK-LABEL: interleave2_same_nonconst_splat_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: interleave2_same_nonconst_splat_nxv4i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vmv.v.x v8, a0
+; ZVBB-NEXT: ret
+ %ins = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0
+ %splat = shufflevector <vscale x 2 x i16> %ins, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> %splat, <vscale x 2 x i16> %splat)
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 4 x i16> @interleave2_diff_nonconst_splat_nxv4i16(i16 %a, i16 %b) {
+; V-LABEL: interleave2_diff_nonconst_splat_nxv4i16:
+; V: # %bb.0:
+; V-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; V-NEXT: vmv.v.x v9, a0
+; V-NEXT: vmv.v.i v10, -1
+; V-NEXT: csrr a0, vlenb
+; V-NEXT: vwaddu.vx v8, v9, a1
+; V-NEXT: vwmaccu.vx v8, a1, v10
+; V-NEXT: srli a0, a0, 2
+; V-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; V-NEXT: vslidedown.vx v9, v8, a0
+; V-NEXT: vslideup.vx v8, v9, a0
+; V-NEXT: ret
+;
+; ZVBB-LABEL: interleave2_diff_nonconst_splat_nxv4i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZVBB-NEXT: vmv.v.x v8, a1
+; ZVBB-NEXT: csrr a1, vlenb
+; ZVBB-NEXT: vwsll.vi v9, v8, 16
+; ZVBB-NEXT: vwaddu.wx v8, v9, a0
+; ZVBB-NEXT: srli a1, a1, 2
+; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVBB-NEXT: vslidedown.vx v9, v8, a1
+; ZVBB-NEXT: vslideup.vx v8, v9, a1
+; ZVBB-NEXT: ret
+;
+; ZIP-LABEL: interleave2_diff_nonconst_splat_nxv4i16:
+; ZIP: # %bb.0:
+; ZIP-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZIP-NEXT: vmv.v.x v9, a0
+; ZIP-NEXT: vmv.v.x v10, a1
+; ZIP-NEXT: csrr a0, vlenb
+; ZIP-NEXT: ri.vzip2b.vv v11, v9, v10
+; ZIP-NEXT: ri.vzip2a.vv v8, v9, v10
+; ZIP-NEXT: srli a0, a0, 2
+; ZIP-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZIP-NEXT: vslideup.vx v8, v11, a0
+; ZIP-NEXT: ret
+ %ins1 = insertelement <vscale x 2 x i16> poison, i16 %a, i32 0
+ %splat1 = shufflevector <vscale x 2 x i16> %ins1, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %ins2 = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+ %splat2 = shufflevector <vscale x 2 x i16> %ins2, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+ %retval = call <vscale x 4 x i16> @llvm.vector.interleave2.nxv4i16(<vscale x 2 x i16> %splat1, <vscale x 2 x i16> %splat2)
+ ret <vscale x 4 x i16> %retval
+}
+
+define <vscale x 8 x i16> @interleave4_same_const_splat_nxv8i16() {
+; CHECK-LABEL: interleave4_same_const_splat_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 3
+; CHECK-NEXT: ret
+;
+; ZVBB-LABEL: interleave4_same_const_splat_nxv8i16:
+; ZVBB: # %bb.0:
+; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVBB-NEXT: vmv.v.i v8, 3
+; ZVBB-NEXT: ret
+ %retval = call <vscale x 8 x i16> @llvm.vector.interleave4.nxv8i16(<vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3), <vscale x 2 x i16> splat(i16 3))
+ ret <vscale x 8 x i16> %retval
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
index 1e2e779..2f2035b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll
@@ -222,3 +222,14 @@ define <vscale x 1 x i64> @vleff_move_past_passthru(ptr %p, ptr %q, iXLen %avl)
%b = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %vec, iXLen %avl)
ret <vscale x 1 x i64> %b
}
+
+define <vscale x 1 x i64> @vmerge(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %x, <vscale x 1 x i64> %y, <vscale x 1 x i1> %m, iXLen %avl) {
+; CHECK-LABEL: vmerge:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT: vmerge.vvm v8, v9, v10, v0
+; CHECK-NEXT: ret
+ %a = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %x, <vscale x 1 x i64> %y, <vscale x 1 x i1> %m, iXLen %avl)
+ %b = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %a, iXLen %avl)
+ ret <vscale x 1 x i64> %b
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir
index 6e106e5..9c3e96d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.mir
@@ -152,3 +152,19 @@ body: |
%y:gpr = ADDI $x0, 1
%z:vr = PseudoVMV_V_V_M1 %passthru, %x, 4, 5 /* e32 */, 0 /* tu, mu */
...
+---
+name: vmerge_vvm
+body: |
+ bb.0:
+ liveins: $v8, $v0
+ ; CHECK-LABEL: name: vmerge_vvm
+ ; CHECK: liveins: $v8, $v0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %passthru:vrnov0 = COPY $v8
+ ; CHECK-NEXT: %mask:vmv0 = COPY $v0
+ ; CHECK-NEXT: %x:vrnov0 = PseudoVMERGE_VVM_M1 %passthru, %passthru, $noreg, %mask, 4, 5 /* e32 */
+ %passthru:vr = COPY $v8
+ %mask:vmv0 = COPY $v0
+ %x:vrnov0 = PseudoVMERGE_VVM_M1 $noreg, %passthru, $noreg, %mask, 4, 5 /* e32 */
+ %z:vr = PseudoVMV_V_V_M1 %passthru, %x, 4, 5 /* e32 */, 0 /* tu, mu */
+...
diff --git a/llvm/test/CodeGen/RISCV/zilsd.ll b/llvm/test/CodeGen/RISCV/zilsd.ll
index 09b065a..048ce96 100644
--- a/llvm/test/CodeGen/RISCV/zilsd.ll
+++ b/llvm/test/CodeGen/RISCV/zilsd.ll
@@ -117,3 +117,22 @@ entyr:
store i64 0, ptr @g
ret void
}
+
+define void @large_offset(ptr nocapture %p, i64 %d) nounwind {
+; CHECK-LABEL: large_offset:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a1, 4
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: ld a2, -384(a0)
+; CHECK-NEXT: addi a2, a2, 1
+; CHECK-NEXT: seqz a1, a2
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: sd a2, -384(a0)
+; CHECK-NEXT: ret
+entry:
+ %add.ptr = getelementptr inbounds i64, ptr %p, i64 2000
+ %a = load i64, ptr %add.ptr, align 8
+ %b = add i64 %a, 1
+ store i64 %b, ptr %add.ptr, align 8
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll
new file mode 100644
index 0000000..b2333e6
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/issue-146942-ptr-cast.ll
@@ -0,0 +1,42 @@
+; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv-unknown-vulkan %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan %s -o - -filetype=obj | spirv-val %}
+
+@.str = private unnamed_addr constant [4 x i8] c"In3\00", align 1
+@.str.2 = private unnamed_addr constant [5 x i8] c"Out4\00", align 1
+@.str.3 = private unnamed_addr constant [5 x i8] c"Out3\00", align 1
+
+
+; CHECK-DAG: %[[#INT32:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#INT4:]] = OpTypeVector %[[#INT32]] 4
+; CHECK-DAG: %[[#FLOAT:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#FLOAT4:]] = OpTypeVector %[[#FLOAT]] 4
+; CHECK-DAG: %[[#INT3:]] = OpTypeVector %[[#INT32]] 3
+; CHECK-DAG: %[[#UNDEF_INT4:]] = OpUndef %[[#INT4]]
+
+define void @case1() local_unnamed_addr {
+ ; CHECK: %[[#BUFFER_LOAD:]] = OpLoad %[[#FLOAT4]] %{{[0-9]+}} Aligned 16
+ ; CHECK: %[[#CAST_LOAD:]] = OpBitcast %[[#INT4]] %[[#BUFFER_LOAD]]
+ ; CHECK: %[[#VEC_SHUFFLE:]] = OpVectorShuffle %[[#INT4]] %[[#CAST_LOAD]] %[[#CAST_LOAD]] 0 1 2 3
+ %1 = tail call target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_0t(i32 0, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %2 = tail call target("spirv.VulkanBuffer", [0 x <4 x i32>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4i32_12_1t(i32 0, i32 5, i32 1, i32 0, i1 false, ptr nonnull @.str.2)
+ %3 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_0t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) %1, i32 0)
+ %4 = load <4 x i32>, ptr addrspace(11) %3, align 16
+ %5 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4i32_12_1t(target("spirv.VulkanBuffer", [0 x <4 x i32>], 12, 1) %2, i32 0)
+ store <4 x i32> %4, ptr addrspace(11) %5, align 16
+ ret void
+}
+
+define void @case2() local_unnamed_addr {
+ ; CHECK: %[[#BUFFER_LOAD:]] = OpLoad %[[#FLOAT4]] %{{[0-9]+}} Aligned 16
+ ; CHECK: %[[#CAST_LOAD:]] = OpBitcast %[[#INT4]] %[[#BUFFER_LOAD]]
+ ; CHECK: %[[#VEC_SHUFFLE:]] = OpVectorShuffle %[[#INT4]] %[[#CAST_LOAD]] %[[#CAST_LOAD]] 0 1 2 3
+ ; CHECK: %[[#VEC_TRUNCATE:]] = OpVectorShuffle %[[#INT3]] %[[#VEC_SHUFFLE]] %[[#UNDEF_INT4]] 0 1 2
+ %1 = tail call target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v4f32_12_0t(i32 0, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %2 = tail call target("spirv.VulkanBuffer", [0 x <3 x i32>], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0v3i32_12_1t(i32 0, i32 5, i32 1, i32 0, i1 false, ptr nonnull @.str.3)
+ %3 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v4f32_12_0t(target("spirv.VulkanBuffer", [0 x <4 x float>], 12, 0) %1, i32 0)
+ %4 = load <4 x i32>, ptr addrspace(11) %3, align 16
+ %5 = shufflevector <4 x i32> %4, <4 x i32> poison, <3 x i32> <i32 0, i32 1, i32 2>
+ %6 = tail call noundef align 16 dereferenceable(16) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0v3i32_12_1t(target("spirv.VulkanBuffer", [0 x <3 x i32>], 12, 1) %2, i32 0)
+ store <3 x i32> %5, ptr addrspace(11) %6, align 16
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
index 085f8b3..9d07b63 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/lifetime.ll
@@ -33,7 +33,7 @@ define spir_func void @foo(ptr noundef byval(%tprange) align 8 %_arg_UserRange)
%RoundedRangeKernel = alloca %tprange, align 8
call void @llvm.lifetime.start.p0(i64 72, ptr nonnull %RoundedRangeKernel)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %RoundedRangeKernel, ptr align 8 %_arg_UserRange, i64 16, i1 false)
- %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 16
+ %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8
call void @llvm.lifetime.end.p0(i64 72, ptr nonnull %RoundedRangeKernel)
ret void
}
@@ -55,7 +55,7 @@ define spir_func void @bar(ptr noundef byval(%tprange) align 8 %_arg_UserRange)
%RoundedRangeKernel = alloca %tprange, align 8
call void @llvm.lifetime.start.p0(i64 -1, ptr nonnull %RoundedRangeKernel)
call void @llvm.memcpy.p0.p0.i64(ptr align 8 %RoundedRangeKernel, ptr align 8 %_arg_UserRange, i64 16, i1 false)
- %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 16
+ %KernelFunc = getelementptr inbounds i8, ptr %RoundedRangeKernel, i64 8
call void @llvm.lifetime.end.p0(i64 -1, ptr nonnull %RoundedRangeKernel)
ret void
}
diff --git a/llvm/test/CodeGen/SPIRV/logical-struct-access.ll b/llvm/test/CodeGen/SPIRV/logical-struct-access.ll
index a1ff1e0..66337b1 100644
--- a/llvm/test/CodeGen/SPIRV/logical-struct-access.ll
+++ b/llvm/test/CodeGen/SPIRV/logical-struct-access.ll
@@ -1,4 +1,5 @@
-; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
+; RUN: llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - -print-after-all | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %}
; CHECK-DAG: [[uint:%[0-9]+]] = OpTypeInt 32 0
diff --git a/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-1.ll b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-1.ll
new file mode 100644
index 0000000..26dc60e
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-1.ll
@@ -0,0 +1,46 @@
+; RUN: llc -verify-machineinstrs -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %}
+
+%struct.S1 = type { <4 x i32>, [10 x <4 x float>], <4 x float> }
+%struct.S2 = type { <4 x float>, <4 x i32> }
+
+@.str = private unnamed_addr constant [3 x i8] c"In\00", align 1
+
+define <4 x float> @main() {
+entry:
+ %0 = tail call target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32 0, i32 1, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %3 = tail call noundef align 1 dereferenceable(192) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) %0, i32 0)
+
+; CHECK-DAG: %[[#ulong:]] = OpTypeInt 64 0
+; CHECK-DAG: %[[#ulong_1:]] = OpConstant %[[#ulong]] 1
+; CHECK-DAG: %[[#ulong_3:]] = OpConstant %[[#ulong]] 3
+
+; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#uint_0:]] = OpConstant %[[#uint]] 0
+; CHECK-DAG: %[[#uint_10:]] = OpConstant %[[#uint]] 10
+
+; CHECK-DAG: %[[#float:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#v4f:]] = OpTypeVector %[[#float]] 4
+; CHECK-DAG: %[[#arr_v4f:]] = OpTypeArray %[[#v4f]] %[[#uint_10]]
+; CHECK-DAG: %[[#S1:]] = OpTypeStruct %[[#]] %[[#arr_v4f]] %[[#]]
+; CHECK-DAG: %[[#sb_S1:]] = OpTypePointer StorageBuffer %[[#S1]]
+; CHECK-DAG: %[[#sb_v4f:]] = OpTypePointer StorageBuffer %[[#v4f]]
+
+; CHECK: %[[#tmp:]] = OpAccessChain %[[#sb_S1]] %[[#]] %[[#uint_0]] %[[#uint_0]]
+; CHECK: %[[#ptr:]] = OpInBoundsAccessChain %[[#sb_v4f]] %[[#tmp]] %[[#ulong_1]] %[[#ulong_3]]
+; This rewritten GEP combined all constant indices into a single value.
+; We should make sure the correct indices are retrieved.
+ %arrayidx.i = getelementptr inbounds nuw i8, ptr addrspace(11) %3, i64 64
+
+; CHECK: OpLoad %[[#v4f]] %[[#ptr]]
+ %4 = load <4 x float>, ptr addrspace(11) %arrayidx.i, align 1
+
+ ret <4 x float> %4
+}
+
+declare i32 @llvm.spv.flattened.thread.id.in.group()
+declare target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32, i32, i32, i32, i1, ptr)
+declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0), i32)
+
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
+
diff --git a/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-2.ll b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-2.ll
new file mode 100644
index 0000000..a6efb38
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access-constant-index-2.ll
@@ -0,0 +1,54 @@
+; RUN: llc -verify-machineinstrs -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %}
+
+%struct.S1 = type { <4 x i32>, [10 x <4 x float>], <4 x float> }
+%struct.S2 = type { <4 x float>, <4 x i32> }
+
+@.str = private unnamed_addr constant [3 x i8] c"In\00", align 1
+
+define <4 x float> @main(i32 %index) {
+entry:
+ %0 = tail call target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32 0, i32 1, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %3 = tail call noundef align 1 dereferenceable(192) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) %0, i32 0)
+
+; CHECK-DAG: %[[#ulong:]] = OpTypeInt 64 0
+; CHECK-DAG: %[[#ulong_1:]] = OpConstant %[[#ulong]] 1
+
+; CHECK-DAG: %[[#uint:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#uint_0:]] = OpConstant %[[#uint]] 0
+; CHECK-DAG: %[[#uint_10:]] = OpConstant %[[#uint]] 10
+; CHECK-DAG: %[[#uint_16:]] = OpConstant %[[#uint]] 16
+
+; CHECK-DAG: %[[#float:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#v4f:]] = OpTypeVector %[[#float]] 4
+; CHECK-DAG: %[[#arr_v4f:]] = OpTypeArray %[[#v4f]] %[[#uint_10]]
+; CHECK-DAG: %[[#S1:]] = OpTypeStruct %[[#]] %[[#arr_v4f]] %[[#]]
+; CHECK-DAG: %[[#sb_S1:]] = OpTypePointer StorageBuffer %[[#S1]]
+; CHECK-DAG: %[[#sb_arr_v4f:]] = OpTypePointer StorageBuffer %[[#arr_v4f]]
+; CHECK-DAG: %[[#sb_v4f:]] = OpTypePointer StorageBuffer %[[#v4f]]
+
+; CHECK: %[[#a:]] = OpAccessChain %[[#sb_S1]] %[[#]] %[[#uint_0]] %[[#uint_0]]
+; CHECK: %[[#b:]] = OpInBoundsAccessChain %[[#sb_arr_v4f]] %[[#a]] %[[#ulong_1]]
+ %4 = getelementptr inbounds nuw i8, ptr addrspace(11) %3, i64 16
+
+; CHECK: %[[#offset:]] = OpIMul %[[#]] %[[#]] %[[#uint_16]]
+; Offset is computed in bytes. Make sure we reconvert it back to an index.
+ %offset = mul i32 %index, 16
+
+; CHECK: %[[#index:]] = OpUDiv %[[#]] %[[#offset]] %[[#uint_16]]
+; CHECK: %[[#c:]] = OpInBoundsAccessChain %[[#sb_v4f]] %[[#b]] %[[#index]]
+ %5 = getelementptr inbounds nuw i8, ptr addrspace(11) %4, i32 %offset
+
+; CHECK: OpLoad %[[#v4f]] %[[#c]]
+ %6 = load <4 x float>, ptr addrspace(11) %5, align 1
+
+ ret <4 x float> %6
+}
+
+declare i32 @llvm.spv.flattened.thread.id.in.group()
+declare target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32, i32, i32, i32, i1, ptr)
+declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0), i32)
+
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
+
+
diff --git a/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access.ll b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access.ll
new file mode 100644
index 0000000..8e6b5a6
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access.ll
@@ -0,0 +1,75 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O3 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %}
+
+; struct S1 {
+; int4 i;
+; float4 f;
+; };
+; struct S2 {
+; float4 f;
+; int4 i;
+; };
+;
+; StructuredBuffer<S1> In : register(t1);
+; RWStructuredBuffer<S2> Out : register(u0);
+;
+; [numthreads(1,1,1)]
+; void main(uint GI : SV_GroupIndex) {
+; Out[GI].f = In[GI].f;
+; Out[GI].i = In[GI].i;
+; }
+
+%struct.S1 = type { <4 x i32>, <4 x float> }
+%struct.S2 = type { <4 x float>, <4 x i32> }
+
+@.str = private unnamed_addr constant [3 x i8] c"In\00", align 1
+@.str.2 = private unnamed_addr constant [4 x i8] c"Out\00", align 1
+
+define void @main() local_unnamed_addr #0 {
+; CHECK-LABEL: main
+; CHECK: %43 = OpFunction %2 None %3 ; -- Begin function main
+; CHECK-NEXT: %1 = OpLabel
+; CHECK-NEXT: %44 = OpVariable %28 Function %38
+; CHECK-NEXT: %45 = OpVariable %27 Function %39
+; CHECK-NEXT: %46 = OpCopyObject %19 %40
+; CHECK-NEXT: %47 = OpCopyObject %16 %41
+; CHECK-NEXT: %48 = OpLoad %4 %42
+; CHECK-NEXT: %49 = OpAccessChain %13 %46 %29 %48
+; CHECK-NEXT: %50 = OpInBoundsAccessChain %9 %49 %31
+; CHECK-NEXT: %51 = OpLoad %8 %50 Aligned 1
+; CHECK-NEXT: %52 = OpAccessChain %11 %47 %29 %48
+; CHECK-NEXT: %53 = OpInBoundsAccessChain %9 %52 %29
+; CHECK-NEXT: OpStore %53 %51 Aligned 1
+; CHECK-NEXT: %54 = OpAccessChain %6 %49 %29
+; CHECK-NEXT: %55 = OpLoad %5 %54 Aligned 1
+; CHECK-NEXT: %56 = OpInBoundsAccessChain %6 %52 %31
+; CHECK-NEXT: OpStore %56 %55 Aligned 1
+; CHECK-NEXT: OpReturn
+; CHECK-NEXT: OpFunctionEnd
+entry:
+ %0 = tail call target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32 0, i32 1, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %1 = tail call target("spirv.VulkanBuffer", [0 x %struct.S2], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S2s_12_1t(i32 0, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str.2)
+ %2 = tail call i32 @llvm.spv.flattened.thread.id.in.group()
+ %3 = tail call noundef align 1 dereferenceable(32) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) %0, i32 %2)
+ %f.i = getelementptr inbounds nuw i8, ptr addrspace(11) %3, i64 16
+ %4 = load <4 x float>, ptr addrspace(11) %f.i, align 1
+ %5 = tail call noundef align 1 dereferenceable(32) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S2s_12_1t(target("spirv.VulkanBuffer", [0 x %struct.S2], 12, 1) %1, i32 %2)
+ store <4 x float> %4, ptr addrspace(11) %5, align 1
+ %6 = load <4 x i32>, ptr addrspace(11) %3, align 1
+ %i6.i = getelementptr inbounds nuw i8, ptr addrspace(11) %5, i64 16
+ store <4 x i32> %6, ptr addrspace(11) %i6.i, align 1
+ ret void
+}
+
+declare i32 @llvm.spv.flattened.thread.id.in.group()
+
+declare target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(i32, i32, i32, i32, i1, ptr)
+
+declare target("spirv.VulkanBuffer", [0 x %struct.S2], 12, 1) @llvm.spv.resource.handlefrombinding.tspirv.VulkanBuffer_a0s_struct.S2s_12_1t(i32, i32, i32, i32, i1, ptr)
+
+declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S2s_12_1t(target("spirv.VulkanBuffer", [0 x %struct.S2], 12, 1), i32)
+
+declare ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.VulkanBuffer_a0s_struct.S1s_12_0t(target("spirv.VulkanBuffer", [0 x %struct.S1], 12, 0), i32)
+
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" }
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll b/llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll
new file mode 100644
index 0000000..8b5efe7
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/transcoding/spirv-target-types.ll
@@ -0,0 +1,104 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpCapability Float16
+; CHECK-DAG: OpCapability ImageBasic
+; CHECK-DAG: OpCapability ImageReadWrite
+; CHECK-DAG: OpCapability Pipes
+; CHECK-DAG: OpCapability DeviceEnqueue
+
+; CHECK-DAG: %[[#VOID:]] = OpTypeVoid
+; CHECK-DAG: %[[#INT:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#HALF:]] = OpTypeFloat 16
+; CHECK-DAG: %[[#FLOAT:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#PIPE_RD:]] = OpTypePipe ReadOnly
+; CHECK-DAG: %[[#PIPE_WR:]] = OpTypePipe WriteOnly
+; CHECK-DAG: %[[#IMG1D_RD:]] = OpTypeImage %[[#VOID]] 1D 0 0 0 0 Unknown ReadOnly
+; CHECK-DAG: %[[#IMG2D_RD:]] = OpTypeImage %[[#INT]] 2D 0 0 0 0
+; CHECK-DAG: %[[#IMG3D_RD:]] = OpTypeImage %[[#INT]] 3D 0 0 0 0
+; CHECK-DAG: %[[#IMG2DA_RD:]] = OpTypeImage %[[#HALF]] 2D 0 1 0 0
+; CHECK-DAG: %[[#IMG2DD_RD:]] = OpTypeImage %[[#FLOAT]] Buffer 0 0 0
+; CHECK-DAG: %[[#IMG1D_WR:]] = OpTypeImage %[[#VOID]] 1D 0 0 0 0 Unknown WriteOnly
+; CHECK-DAG: %[[#IMG2D_RW:]] = OpTypeImage %[[#VOID]] 2D 0 0 0 0 Unknown ReadWrite
+; CHECK-DAG: %[[#IMG1DB_RD:]] = OpTypeImage %[[#FLOAT]] 2D 1 0 0 0
+
+; CHECK-DAG: %[[#DEVEVENT:]] = OpTypeDeviceEvent
+; CHECK-DAG: %[[#EVENT:]] = OpTypeEvent
+; CHECK-DAG: %[[#QUEUE:]] = OpTypeQueue
+; CHECK-DAG: %[[#RESID:]] = OpTypeReserveId
+; CHECK-DAG: %[[#SAMP:]] = OpTypeSampler
+; CHECK-DAG: %[[#SAMPIMG:]] = OpTypeSampledImage %[[#IMG1DB_RD]]
+
+; CHECK-DAG: %[[#]] = OpFunction %[[#VOID]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#PIPE_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#PIPE_WR]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG1D_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2D_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG3D_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2DA_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2DD_RD]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG1D_WR]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#IMG2D_RW]]
+
+define spir_kernel void @foo(
+ target("spirv.Pipe", 0) %a,
+ target("spirv.Pipe", 1) %b,
+ target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 0) %c1,
+ target("spirv.Image", i32, 1, 0, 0, 0, 0, 0, 0) %d1,
+ target("spirv.Image", i32, 2, 0, 0, 0, 0, 0, 0) %e1,
+ target("spirv.Image", half, 1, 0, 1, 0, 0, 0, 0) %f1,
+ target("spirv.Image", float, 5, 0, 0, 0, 0, 0, 0) %g1,
+ target("spirv.Image", void, 0, 0, 0, 0, 0, 0, 1) %c2,
+ target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 2) %d3) #0 !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3 !kernel_arg_base_type !4 !kernel_arg_type_qual !5 {
+entry:
+ ret void
+}
+
+; CHECK-DAG: %[[#]] = OpFunction
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#DEVEVENT]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#EVENT]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#QUEUE]]
+; CHECK-DAG: %[[#]] = OpFunctionParameter %[[#RESID]]
+
+; CHECK-DAG: %[[#IMARG:]] = OpFunctionParameter %[[#IMG1DB_RD]]
+; CHECK-DAG: %[[#SAMARG:]] = OpFunctionParameter %[[#SAMP]]
+; CHECK-DAG: %[[#SAMPIMVAR:]] = OpSampledImage %[[#SAMPIMG]] %[[#IMARG]] %[[#SAMARG]]
+; CHECK-DAG: %[[#]] = OpImageSampleExplicitLod %[[#]] %[[#SAMPIMVAR]]
+
+define spir_func void @bar(
+ target("spirv.DeviceEvent") %a,
+ target("spirv.Event") %b,
+ target("spirv.Queue") %c,
+ target("spirv.ReserveId") %d) {
+ ret void
+}
+
+define spir_func void @test_sampler(target("spirv.Image", float, 1, 1, 0, 0, 0, 0, 0) %srcimg.coerce,
+ target("spirv.Sampler") %s.coerce) {
+ %1 = tail call spir_func target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0) @_Z20__spirv_SampledImagePU3AS1K34__spirv_Image__float_1_1_0_0_0_0_0PU3AS1K15__spirv_Sampler(target("spirv.Image", float, 1, 1, 0, 0, 0, 0, 0) %srcimg.coerce, target("spirv.Sampler") %s.coerce) #1
+ %2 = tail call spir_func <4 x float> @_Z38__spirv_ImageSampleExplicitLod_Rfloat4PU3AS120__spirv_SampledImageDv4_iif(target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0) %1, <4 x i32> zeroinitializer, i32 2, float 1.000000e+00) #1
+ ret void
+}
+
+declare spir_func target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0) @_Z20__spirv_SampledImagePU3AS1K34__spirv_Image__float_1_1_0_0_0_0_0PU3AS1K15__spirv_Sampler(target("spirv.Image", float, 1, 1, 0, 0, 0, 0, 0), target("spirv.Sampler"))
+
+declare spir_func <4 x float> @_Z38__spirv_ImageSampleExplicitLod_Rfloat4PU3AS120__spirv_SampledImageDv4_iif(target("spirv.SampledImage", float, 1, 1, 0, 0, 0, 0, 0), <4 x i32>, i32, float)
+
+attributes #0 = { nounwind readnone "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!opencl.enable.FP_CONTRACT = !{}
+!opencl.spir.version = !{!6}
+!opencl.ocl.version = !{!7}
+!opencl.used.extensions = !{!8}
+!opencl.used.optional.core.features = !{!9}
+!opencl.compiler.options = !{!8}
+
+!1 = !{i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1}
+!2 = !{!"read_only", !"write_only", !"read_only", !"read_only", !"read_only", !"read_only", !"read_only", !"write_only", !"read_write"}
+!3 = !{!"int", !"int", !"image1d_t", !"image2d_t", !"image3d_t", !"image2d_array_t", !"image1d_buffer_t", !"image1d_t", !"image2d_t"}
+!4 = !{!"int", !"int", !"image1d_t", !"image2d_t", !"image3d_t", !"image2d_array_t", !"image1d_buffer_t", !"image1d_t", !"image2d_t"}
+!5 = !{!"pipe", !"pipe", !"", !"", !"", !"", !"", !"", !""}
+!6 = !{i32 1, i32 2}
+!7 = !{i32 2, i32 0}
+!8 = !{!"cl_khr_fp16"}
+!9 = !{!"cl_images"}
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll b/llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll
new file mode 100644
index 0000000..63b2604
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/transcoding/unused-sret-opaque-ptr.ll
@@ -0,0 +1,19 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpName %[[#Fun:]] "_Z3booi"
+; CHECK-DAG: OpDecorate %[[#Param:]] FuncParamAttr Sret
+; CHECK-DAG: %[[#PtrTy:]] = OpTypePointer Function %[[#StructTy:]]
+; CHECK-DAG: %[[#StructTy]] = OpTypeStruct
+; CHECK: %[[#Fun]] = OpFunction %[[#]]
+; CHECK: %[[#Param]] = OpFunctionParameter %[[#PtrTy]]
+
+%struct.Example = type { }
+
+define spir_func i32 @foo() {
+ %1 = alloca %struct.Example, align 8
+ call void @_Z3booi(ptr sret(%struct.Example) align 8 %1, i32 noundef 42)
+ ret i32 0
+}
+
+declare void @_Z3booi(ptr sret(%struct.Example) align 8, i32 noundef)
diff --git a/llvm/test/CodeGen/SystemZ/vec-mul-07.ll b/llvm/test/CodeGen/SystemZ/vec-mul-07.ll
index 73c7a8d..5835616 100644
--- a/llvm/test/CodeGen/SystemZ/vec-mul-07.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-mul-07.ll
@@ -3,6 +3,23 @@
;
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+; Test a v16i8 -> v8i16 unsigned widening multiplication
+; which is not folded into an even/odd widening operation.
+define <8 x i16> @f1_not(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuplhb %v0, %v24
+; CHECK-NEXT: vuplhb %v1, %v26
+; CHECK-NEXT: vmlhw %v24, %v0, %v1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <16 x i8> %val1, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %zext1 = zext <8 x i8> %shuf1 to <8 x i16>
+ %shuf2 = shufflevector <16 x i8> %val2, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %zext2 = zext <8 x i8> %shuf2 to <8 x i16>
+ %ret = mul <8 x i16> %zext1, %zext2
+ ret <8 x i16> %ret
+}
+
; Test a v16i8 (even) -> v8i16 unsigned widening multiplication.
define <8 x i16> @f1(<16 x i8> %val1, <16 x i8> %val2) {
; CHECK-LABEL: f1:
@@ -31,6 +48,23 @@ define <8 x i16> @f2(<16 x i8> %val1, <16 x i8> %val2) {
ret <8 x i16> %ret
}
+; Test a v16i8 -> v8i16 signed widening multiplication
+; which is not folded into an even/odd widening operation.
+define <8 x i16> @f3_not(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f3_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuphb %v0, %v26
+; CHECK-NEXT: vuphb %v1, %v24
+; CHECK-NEXT: vmlhw %v24, %v1, %v0
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <16 x i8> %val1, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %sext1 = sext <8 x i8> %shuf1 to <8 x i16>
+ %shuf2 = shufflevector <16 x i8> %val2, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %sext2 = sext <8 x i8> %shuf2 to <8 x i16>
+ %ret = mul <8 x i16> %sext1, %sext2
+ ret <8 x i16> %ret
+}
+
; Test a v16i8 (even) -> v8i16 signed widening multiplication.
define <8 x i16> @f3(<16 x i8> %val1, <16 x i8> %val2) {
; CHECK-LABEL: f3:
@@ -59,6 +93,23 @@ define <8 x i16> @f4(<16 x i8> %val1, <16 x i8> %val2) {
ret <8 x i16> %ret
}
+; Test a v8i16 -> v4i32 unsigned widening multiplication
+; which is not folded into an even/odd widening operation.
+define <4 x i32> @f5_not(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f5_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuplhh %v0, %v24
+; CHECK-NEXT: vuplhh %v1, %v26
+; CHECK-NEXT: vmlf %v24, %v0, %v1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <8 x i16> %val1, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %zext1 = zext <4 x i16> %shuf1 to <4 x i32>
+ %shuf2 = shufflevector <8 x i16> %val2, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %zext2 = zext <4 x i16> %shuf2 to <4 x i32>
+ %ret = mul <4 x i32> %zext1, %zext2
+ ret <4 x i32> %ret
+}
+
; Test a v8i16 (even) -> v4i32 unsigned widening multiplication.
define <4 x i32> @f5(<8 x i16> %val1, <8 x i16> %val2) {
; CHECK-LABEL: f5:
@@ -87,6 +138,23 @@ define <4 x i32> @f6(<8 x i16> %val1, <8 x i16> %val2) {
ret <4 x i32> %ret
}
+; Test a v8i16 -> v4i32 signed widening multiplication
+; which is not folded into an even/odd widening operation.
+define <4 x i32> @f7_not(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f7_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuphh %v0, %v26
+; CHECK-NEXT: vuphh %v1, %v24
+; CHECK-NEXT: vmlf %v24, %v1, %v0
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <8 x i16> %val1, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %sext1 = sext <4 x i16> %shuf1 to <4 x i32>
+ %shuf2 = shufflevector <8 x i16> %val2, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %sext2 = sext <4 x i16> %shuf2 to <4 x i32>
+ %ret = mul <4 x i32> %sext1, %sext2
+ ret <4 x i32> %ret
+}
+
; Test a v8i16 (even) -> v4i32 signed widening multiplication.
define <4 x i32> @f7(<8 x i16> %val1, <8 x i16> %val2) {
; CHECK-LABEL: f7:
@@ -115,6 +183,29 @@ define <4 x i32> @f8(<8 x i16> %val1, <8 x i16> %val2) {
ret <4 x i32> %ret
}
+; Test a v4i32 -> v2i64 unsigned widening multiplication
+; which is not folded into an even/odd widening operation.
+define <2 x i64> @f9_not(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f9_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuplhf %v0, %v24
+; CHECK-NEXT: vuplhf %v1, %v26
+; CHECK-NEXT: vlgvg %r0, %v1, 1
+; CHECK-NEXT: vlgvg %r1, %v0, 1
+; CHECK-NEXT: msgr %r1, %r0
+; CHECK-NEXT: vlgvg %r0, %v1, 0
+; CHECK-NEXT: vlgvg %r2, %v0, 0
+; CHECK-NEXT: msgr %r2, %r0
+; CHECK-NEXT: vlvgp %v24, %r2, %r1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <4 x i32> %val1, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %zext1 = zext <2 x i32> %shuf1 to <2 x i64>
+ %shuf2 = shufflevector <4 x i32> %val2, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %zext2 = zext <2 x i32> %shuf2 to <2 x i64>
+ %ret = mul <2 x i64> %zext1, %zext2
+ ret <2 x i64> %ret
+}
+
; Test a v4i32 (even) -> v2i64 unsigned widening multiplication.
define <2 x i64> @f9(<4 x i32> %val1, <4 x i32> %val2) {
; CHECK-LABEL: f9:
@@ -143,6 +234,29 @@ define <2 x i64> @f10(<4 x i32> %val1, <4 x i32> %val2) {
ret <2 x i64> %ret
}
+; Test a v4i32 -> v2i64 signed widening multiplication
+; which is not folded into an even/odd widening operation.
+define <2 x i64> @f11_not(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f11_not:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vuphf %v0, %v24
+; CHECK-NEXT: vuphf %v1, %v26
+; CHECK-NEXT: vlgvg %r0, %v1, 1
+; CHECK-NEXT: vlgvg %r1, %v0, 1
+; CHECK-NEXT: msgr %r1, %r0
+; CHECK-NEXT: vlgvg %r0, %v1, 0
+; CHECK-NEXT: vlgvg %r2, %v0, 0
+; CHECK-NEXT: msgr %r2, %r0
+; CHECK-NEXT: vlvgp %v24, %r2, %r1
+; CHECK-NEXT: br %r14
+ %shuf1 = shufflevector <4 x i32> %val1, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %sext1 = sext <2 x i32> %shuf1 to <2 x i64>
+ %shuf2 = shufflevector <4 x i32> %val2, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %sext2 = sext <2 x i32> %shuf2 to <2 x i64>
+ %ret = mul <2 x i64> %sext1, %sext2
+ ret <2 x i64> %ret
+}
+
; Test a v4i32 (even) -> v2i64 signed widening multiplication.
define <2 x i64> @f11(<4 x i32> %val1, <4 x i32> %val2) {
; CHECK-LABEL: f11:
diff --git a/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll b/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll
index 60cfc27..4a4973b 100644
--- a/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll
+++ b/llvm/test/CodeGen/WebAssembly/target-features-cpus.ll
@@ -68,9 +68,9 @@ target triple = "wasm32-unknown-unknown"
; bleeding-edge: +atomics, +bulk-memory, +bulk-memory-opt,
; +call-indirect-overlong, +exception-handling,
-; +extended-const, +fp16, +multimemory, +multivalue,
+; +extended-const, +fp16, +gc, +multimemory, +multivalue,
; +mutable-globals, +nontrapping-fptoint, +relaxed-simd,
-; +reference-types, +simd128, +sign-ext, +tail-call, +gc
+; +reference-types, +simd128, +sign-ext, +tail-call
; BLEEDING-EDGE-LABEL: .section .custom_section.target_features,"",@
; BLEEDING-EDGE-NEXT: .int8 17
; BLEEDING-EDGE-NEXT: .int8 43
diff --git a/llvm/test/CodeGen/X86/apx/cf.ll b/llvm/test/CodeGen/X86/apx/cf.ll
index b111ae5..e52ce6c 100644
--- a/llvm/test/CodeGen/X86/apx/cf.ll
+++ b/llvm/test/CodeGen/X86/apx/cf.ll
@@ -194,3 +194,38 @@ entry:
call void @llvm.masked.store.v1i64.p0(<1 x i64> %3, ptr %p, i32 4, <1 x i1> %0)
ret void
}
+
+define void @sink_gep(ptr %p, i1 %cond) {
+; CHECK-LABEL: sink_gep:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: testb $1, %sil
+; CHECK-NEXT: cfcmovnel %eax, 112(%rdi)
+; CHECK-NEXT: cfcmovnel 112(%rdi), %eax
+; CHECK-NEXT: movl %eax, (%rdi)
+; CHECK-NEXT: retq
+entry:
+ %0 = getelementptr i8, ptr %p, i64 112
+ br label %next
+
+next:
+ %1 = bitcast i1 %cond to <1 x i1>
+ call void @llvm.masked.store.v1i32.p0(<1 x i32> zeroinitializer, ptr %0, i32 1, <1 x i1> %1)
+ %2 = call <1 x i32> @llvm.masked.load.v1i32.p0(ptr %0, i32 1, <1 x i1> %1, <1 x i32> zeroinitializer)
+ store <1 x i32> %2, ptr %p, align 4
+ ret void
+}
+
+define void @xor_cond(ptr %p, i1 %cond) {
+; CHECK-LABEL: xor_cond:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: testb $1, %sil
+; CHECK-NEXT: cfcmovel %eax, (%rdi)
+; CHECK-NEXT: retq
+entry:
+ %0 = xor i1 %cond, true
+ %1 = insertelement <1 x i1> zeroinitializer, i1 %0, i64 0
+ call void @llvm.masked.store.v1i32.p0(<1 x i32> zeroinitializer, ptr %p, i32 1, <1 x i1> %1)
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/call-graph-section-assembly.ll b/llvm/test/CodeGen/X86/call-graph-section-assembly.ll
new file mode 100644
index 0000000..1136287
--- /dev/null
+++ b/llvm/test/CodeGen/X86/call-graph-section-assembly.ll
@@ -0,0 +1,43 @@
+;; Test if temporary labels are generated for each indirect callsite with a callee_type metadata.
+;; Test if the .callgraph section contains the MD5 hash of callee type ids generated from
+;; generalized type id strings.
+
+; RUN: llc -mtriple=x86_64-unknown-linux --call-graph-section -o - < %s | FileCheck %s
+
+; CHECK: ball:
+; CHECK-NEXT: [[LABEL_FUNC:\.Lfunc_begin[0-9]+]]:
+define ptr @ball() {
+entry:
+ %fp_foo_val = load ptr, ptr null, align 8
+ ; CHECK: [[LABEL_TMP0:\.L.*]]:
+ call void (...) %fp_foo_val(), !callee_type !0
+ %fp_bar_val = load ptr, ptr null, align 8
+ ; CHECK: [[LABEL_TMP1:\.L.*]]:
+ %call_fp_bar = call i32 %fp_bar_val(i8 0), !callee_type !2
+ %fp_baz_val = load ptr, ptr null, align 8
+ ; CHECK: [[LABEL_TMP2:\.L.*]]:
+ %call_fp_baz = call ptr %fp_baz_val(ptr null), !callee_type !4
+ ret ptr %call_fp_baz
+}
+
+; CHECK: .section .callgraph,"o",@progbits,.text
+
+; CHECK-NEXT: .quad 0
+; CHECK-NEXT: .quad [[LABEL_FUNC]]
+; CHECK-NEXT: .quad 1
+; CHECK-NEXT: .quad 3
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvE.generalized"}
+;; Test for MD5 hash of _ZTSFvE.generalized and the generated temporary callsite label.
+; CHECK-NEXT: .quad 4524972987496481828
+; CHECK-NEXT: .quad [[LABEL_TMP0]]
+!2 = !{!3}
+!3 = !{i64 0, !"_ZTSFicE.generalized"}
+;; Test for MD5 hash of _ZTSFicE.generalized and the generated temporary callsite label.
+; CHECK-NEXT: .quad 3498816979441845844
+; CHECK-NEXT: .quad [[LABEL_TMP1]]
+!4 = !{!5}
+!5 = !{i64 0, !"_ZTSFPvS_E.generalized"}
+;; Test for MD5 hash of _ZTSFPvS_E.generalized and the generated temporary callsite label.
+; CHECK-NEXT: .quad 8646233951371320954
+; CHECK-NEXT: .quad [[LABEL_TMP2]]
diff --git a/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll b/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll
new file mode 100644
index 0000000..fa14a98
--- /dev/null
+++ b/llvm/test/CodeGen/X86/call-graph-section-tailcall.ll
@@ -0,0 +1,34 @@
+;; Tests that we store the type identifiers in .callgraph section of the object file for tailcalls.
+
+; RUN: llc -mtriple=x86_64-unknown-linux --call-graph-section -filetype=obj -o - < %s | \
+; RUN: llvm-readelf -x .callgraph - | FileCheck %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+define i32 @main(i32 %argc) !type !3 {
+entry:
+ %andop = and i32 %argc, 1
+ %cmp = icmp eq i32 %andop, 0
+ %foo.bar = select i1 %cmp, ptr @foo, ptr @bar
+ %call.i = tail call i32 %foo.bar(i8 signext 97), !callee_type !1
+ ret i32 %call.i
+}
+
+declare !type !2 i32 @foo(i8 signext)
+
+declare !type !2 i32 @bar(i8 signext)
+
+;; Check that the numeric type id (md5 hash) for the below type ids are emitted
+;; to the callgraph section.
+
+; CHECK: Hex dump of section '.callgraph':
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+; CHECK-DAG: 5486bc59 814b8e30
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
+!3 = !{i64 0, !"_ZTSFiiE.generalized"}
diff --git a/llvm/test/CodeGen/X86/call-graph-section.ll b/llvm/test/CodeGen/X86/call-graph-section.ll
new file mode 100644
index 0000000..4a9840e
--- /dev/null
+++ b/llvm/test/CodeGen/X86/call-graph-section.ll
@@ -0,0 +1,38 @@
+;; Tests that we store the type identifiers in .callgraph section of the object file.
+
+; RUN: llc -mtriple=x86_64-unknown-linux --call-graph-section -filetype=obj -o - < %s | \
+; RUN: llvm-readelf -x .callgraph - | FileCheck %s
+
+declare !type !0 void @foo()
+
+declare !type !1 i32 @bar(i8)
+
+declare !type !2 ptr @baz(ptr)
+
+define void @main() {
+entry:
+ %a = alloca i8, align 1
+ %fp_foo_val = load ptr, ptr null, align 8
+ call void (...) %fp_foo_val(), !callee_type !1
+ %fp_bar_val = load ptr, ptr null, align 8
+ %param = trunc i64 0 to i8
+ %call_fp_bar = call i32 %fp_bar_val(i8 signext %param), !callee_type !3
+ %fp_baz_val = load ptr, ptr null, align 8
+ %call_fp_baz = call ptr %fp_baz_val(ptr %a), !callee_type !4
+ ret void
+}
+
+;; Check that the numeric type id (md5 hash) for the below type ids are emitted
+;; to the callgraph section.
+
+; CHECK: Hex dump of section '.callgraph':
+
+; CHECK-DAG: 2444f731 f5eecb3e
+!0 = !{i64 0, !"_ZTSFvE.generalized"}
+!1 = !{!0}
+; CHECK-DAG: 5486bc59 814b8e30
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
+!3 = !{!2}
+; CHECK-DAG: 7ade6814 f897fd77
+!4 = !{!5}
+!5 = !{i64 0, !"_ZTSFPvS_E.generalized"}
diff --git a/llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll b/llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll
new file mode 100644
index 0000000..7881ea7
--- /dev/null
+++ b/llvm/test/CodeGen/X86/calleetypeid-directcall-mismatched.ll
@@ -0,0 +1,32 @@
+;; Tests that callee_type metadata attached to direct call sites are safely ignored.
+
+; RUN: llc --call-graph-section -mtriple x86_64-linux-gnu < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+;; Test that `calleeTypeIds` field is not present in `callSites`
+; CHECK-LABEL: callSites:
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+; CHECK-NEXT: - { bb: {{[0-9]+}}, offset: {{[0-9]+}}, fwdArgRegs: [] }
+define i32 @foo(i32 %x, i32 %y) !type !0 {
+entry:
+ ;; Call instruction with accurate callee_type.
+ ;; callee_type should be dropped seemlessly.
+ %call = call i32 @fizz(i32 %x, i32 %y), !callee_type !1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call1 = call i32 @fizz(i32 %x, i32 %y), !callee_type !3
+ %add = add nsw i32 %call, %call1
+ ;; Call instruction with mismatched callee_type.
+ ;; callee_type should be dropped seemlessly without errors.
+ %call2 = call i32 @fizz(i32 %add, i32 %y), !callee_type !3
+ %sub = sub nsw i32 %add, %call2
+ ret i32 %sub
+}
+
+declare !type !2 i32 @fizz(i32, i32)
+
+!0 = !{i64 0, !"_ZTSFiiiiE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFiiiE.generalized"}
+!3 = !{!4}
+!4 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll
new file mode 100644
index 0000000..8f6b7a6
--- /dev/null
+++ b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid-tailcall.ll
@@ -0,0 +1,19 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata for indirect tail calls.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=x86_64-unknown-linux < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+define i32 @check_tailcall(ptr %func, i8 %x) !type !0 {
+entry:
+ ; CHECK: callSites:
+ ; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+ ; CHECK-NEXT: [ 3498816979441845844 ] }
+ %call = tail call i32 %func(i8 signext %x), !callee_type !1
+ ret i32 %call
+}
+
+!0 = !{i64 0, !"_ZTSFiPvcE.generalized"}
+!1 = !{!2}
+!2 = !{i64 0, !"_ZTSFicE.generalized"}
diff --git a/llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll
new file mode 100644
index 0000000..e97a6ac
--- /dev/null
+++ b/llvm/test/CodeGen/X86/callsite-emit-calleetypeid.ll
@@ -0,0 +1,20 @@
+;; Tests that call site callee type ids can be extracted and set from
+;; callee_type metadata.
+
+;; Verify the exact calleeTypeIds value to ensure it is not garbage but the value
+;; computed as the type id from the callee_type metadata.
+; RUN: llc --call-graph-section -mtriple=x86_64-unknown-linux < %s -stop-after=finalize-isel -o - | FileCheck --match-full-lines %s
+
+; CHECK: name: main
+; CHECK: callSites:
+; CHECK-NEXT: - { bb: {{.*}}, offset: {{.*}}, fwdArgRegs: [], calleeTypeIds:
+; CHECK-NEXT: [ 7854600665770582568 ] }
+define i32 @main() {
+entry:
+ %fn = load ptr, ptr null, align 8
+ call void %fn(i8 0), !callee_type !0
+ ret i32 0
+}
+
+!0 = !{!1}
+!1 = !{i64 0, !"_ZTSFvcE.generalized"}
diff --git a/llvm/test/CodeGen/X86/tail-dup-computed-goto.mir b/llvm/test/CodeGen/X86/early-tail-dup-computed-goto.mir
index 17de405..0f28964 100644
--- a/llvm/test/CodeGen/X86/tail-dup-computed-goto.mir
+++ b/llvm/test/CodeGen/X86/early-tail-dup-computed-goto.mir
@@ -1,6 +1,8 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=early-tailduplication -tail-dup-pred-size=1 -tail-dup-succ-size=1 %s -o - | FileCheck %s
-# Check that only the computed goto is not be restrict by tail-dup-pred-size and tail-dup-succ-size.
+#
+# Check that only the computed goto and others are restricted by tail-dup-pred-size and tail-dup-succ-size.
+#
--- |
@computed_goto.dispatch = constant [5 x ptr] [ptr null, ptr blockaddress(@computed_goto, %bb1), ptr blockaddress(@computed_goto, %bb2), ptr blockaddress(@computed_goto, %bb3), ptr blockaddress(@computed_goto, %bb4)]
declare i64 @f0()
@@ -30,54 +32,54 @@ tracksRegLiveness: true
body: |
; CHECK-LABEL: name: computed_goto
; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f0, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64_nosp = COPY [[COPY]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1.bb1 (ir-block-address-taken %ir-block.bb1):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f1, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gr64_nosp = COPY [[COPY2]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY2]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2.bb2 (ir-block-address-taken %ir-block.bb2):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f2, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gr64_nosp = COPY [[COPY4]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY4]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3.bb3 (ir-block-address-taken %ir-block.bb3):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f3, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY7:%[0-9]+]]:gr64_nosp = COPY [[COPY6]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY6]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: JMP_1 %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.4.bb4 (ir-block-address-taken %ir-block.bb4):
- ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f4, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
; CHECK-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- ; CHECK-NEXT: [[COPY8:%[0-9]+]]:gr64_nosp = COPY $rax
- ; CHECK-NEXT: [[COPY9:%[0-9]+]]:gr64_nosp = COPY [[COPY8]]
- ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY8]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5:
+ ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:gr64_nosp = PHI [[COPY]], %bb.0, [[COPY4]], %bb.4, [[COPY3]], %bb.3, [[COPY2]], %bb.2, [[COPY1]], %bb.1
+ ; CHECK-NEXT: JMP64m $noreg, 8, [[PHI]], @computed_goto.dispatch, $noreg
bb.0:
ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
CALL64pcrel32 target-flags(x86-plt) @f0, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll b/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll
index ca1153a..bdada7d 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/X86/globals.ll
@@ -1,12 +1,5 @@
; RUN: opt < %s -S -passes=hwasan -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
-; CHECK: @__start_hwasan_globals = external hidden constant [0 x i8]
-; CHECK: @__stop_hwasan_globals = external hidden constant [0 x i8]
-
-; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
-
-; CHECK: @hwasan.dummy.global = private constant [0 x i8] zeroinitializer, section "hwasan_globals", comdat($hwasan.module_ctor), !associated [[NOTE:![0-9]+]]
-
; CHECK: @four.hwasan = private global { i32, [12 x i8] } { i32 1, [12 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\10" }, align 16
; CHECK: @four.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @four.hwasan to i64), i64 ptrtoint (ptr @four.hwasan.descriptor to i64)) to i32), i32 268435460 }, section "hwasan_globals", !associated [[FOUR:![0-9]+]]
@@ -17,14 +10,21 @@
; CHECK: @huge.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor to i64)) to i32), i32 318767088 }, section "hwasan_globals", !associated [[HUGE:![0-9]+]]
; CHECK: @huge.hwasan.descriptor.1 = private constant { i32, i32 } { i32 trunc (i64 add (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor.1 to i64)), i64 16777200) to i32), i32 301989920 }, section "hwasan_globals", !associated [[HUGE]]
+; CHECK: @__start_hwasan_globals = external hidden constant [0 x i8]
+; CHECK: @__stop_hwasan_globals = external hidden constant [0 x i8]
+
+; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
+
+; CHECK: @hwasan.dummy.global = private constant [0 x i8] zeroinitializer, section "hwasan_globals", comdat($hwasan.module_ctor), !associated [[NOTE:![0-9]+]]
+
; CHECK: @four = alias i32, inttoptr (i64 add (i64 ptrtoint (ptr @four.hwasan to i64), i64 2305843009213693952) to ptr)
; CHECK: @sixteen = alias [16 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @sixteen.hwasan to i64), i64 2449958197289549824) to ptr)
; CHECK: @huge = alias [16777232 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @huge.hwasan to i64), i64 2594073385365405696) to ptr)
-; CHECK: [[NOTE]] = !{ptr @hwasan.note}
; CHECK: [[FOUR]] = !{ptr @four.hwasan}
; CHECK: [[SIXTEEN]] = !{ptr @sixteen.hwasan}
; CHECK: [[HUGE]] = !{ptr @huge.hwasan}
+; CHECK: [[NOTE]] = !{ptr @hwasan.note}
source_filename = "foo"
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/globals.ll b/llvm/test/Instrumentation/HWAddressSanitizer/globals.ll
index f5ae1c0..4c28523 100644
--- a/llvm/test/Instrumentation/HWAddressSanitizer/globals.ll
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/globals.ll
@@ -1,16 +1,11 @@
-; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64--linux-android29 | FileCheck --check-prefixes=CHECK,CHECK29 %s
-; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64--linux-android30 | FileCheck --check-prefixes=CHECK,CHECK30 %s
+; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64--linux-android29 | FileCheck --check-prefixes=CHECK,CHECK29,NOALLGLOBALS %s
+; RUN: opt < %s -S -passes=hwasan -mtriple=aarch64--linux-android30 | FileCheck --check-prefixes=CHECK,CHECK30,NOALLGLOBALS %s
+; RUN: opt < %s -S -passes=hwasan -mtriple=riscv64-unknown-elf -hwasan-globals=1 -hwasan-all-globals=1 | FileCheck --check-prefixes=CHECK,CHECK30,ALLGLOBALS %s
; CHECK29: @four = global
; CHECK: @specialcaselisted = global i16 2, no_sanitize_hwaddress
-; CHECK: @insection = global i16 2, section "custom"
-; CHECK: @__start_hwasan_globals = external hidden constant [0 x i8]
-; CHECK: @__stop_hwasan_globals = external hidden constant [0 x i8]
-
-; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
-
-; CHECK: @hwasan.dummy.global = private constant [0 x i8] zeroinitializer, section "hwasan_globals", comdat($hwasan.module_ctor), !associated [[NOTE:![0-9]+]]
+; NOALLGLOBALS: @insection = global i16 2, section "custom"
; CHECK30: @four.hwasan = private global { i32, [12 x i8] } { i32 1, [12 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\AC" }, align 16
; CHECK30: @four.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @four.hwasan to i64), i64 ptrtoint (ptr @four.hwasan.descriptor to i64)) to i32), i32 -1409286140 }, section "hwasan_globals", !associated [[FOUR:![0-9]+]]
@@ -22,14 +17,23 @@
; CHECK30: @huge.hwasan.descriptor = private constant { i32, i32 } { i32 trunc (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor to i64)) to i32), i32 -1358954512 }, section "hwasan_globals", !associated [[HUGE:![0-9]+]]
; CHECK30: @huge.hwasan.descriptor.1 = private constant { i32, i32 } { i32 trunc (i64 add (i64 sub (i64 ptrtoint (ptr @huge.hwasan to i64), i64 ptrtoint (ptr @huge.hwasan.descriptor.1 to i64)), i64 16777200) to i32), i32 -1375731680 }, section "hwasan_globals", !associated [[HUGE]]
+; ALLGLOBALS: @insection.hwasan = private global { i16, [14 x i8] } { i16 2, [14 x i8] c"\00\00\00\00\00\00\00\00\00\00\00\00\00\AF" }, section "custom", align 16
+
+; CHECK: @__start_hwasan_globals = external hidden constant [0 x i8]
+; CHECK: @__stop_hwasan_globals = external hidden constant [0 x i8]
+
+; CHECK: @hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
+
+; CHECK: @hwasan.dummy.global = private constant [0 x i8] zeroinitializer, section "hwasan_globals", comdat($hwasan.module_ctor), !associated [[NOTE:![0-9]+]]
+
; CHECK30: @four = alias i32, inttoptr (i64 add (i64 ptrtoint (ptr @four.hwasan to i64), i64 -6052837899185946624) to ptr)
; CHECK30: @sixteen = alias [16 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @sixteen.hwasan to i64), i64 -5980780305148018688) to ptr)
; CHECK30: @huge = alias [16777232 x i8], inttoptr (i64 add (i64 ptrtoint (ptr @huge.hwasan to i64), i64 -5908722711110090752) to ptr)
-; CHECK: [[NOTE]] = !{ptr @hwasan.note}
; CHECK30: [[FOUR]] = !{ptr @four.hwasan}
; CHECK30: [[SIXTEEN]] = !{ptr @sixteen.hwasan}
; CHECK30: [[HUGE]] = !{ptr @huge.hwasan}
+; CHECK: [[NOTE]] = !{ptr @hwasan.note}
source_filename = "foo"
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop2_fake16_err.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop2_fake16_err.s
index bc0f586..f586e4a 100644
--- a/llvm/test/MC/AMDGPU/gfx11_asm_vop2_fake16_err.s
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop2_fake16_err.s
@@ -173,6 +173,21 @@ v_mul_f16_e32 v5, v1, v255
v_mul_f16_e32 v5, v255, v2
// GFX11: :[[@LINE-1]]:1: error: operands are not valid for this GPU or mode
+v_pk_fmac_f16 v0, v1, v2 quad_perm:[1,2,3,0]
+// GFX11: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16 v0, v1, v2 quad_perm:[1,2,3,0] row_mask:0x0 bank_mask:0x0
+// GFX11: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16 v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16_dpp v0, v1, v2 quad_perm:[1,2,3,0]
+// GFX11: :[[@LINE-1]]:1: error: dpp variant of this instruction is not supported
+
+v_pk_fmac_f16_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: :[[@LINE-1]]:1: error: dpp variant of this instruction is not supported
+
v_sub_f16_dpp v255, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
// GFX11: :[[@LINE-1]]:1: error: operands are not valid for this GPU or mode
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3-fake16.s
index 04c55ee..1f40a32 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3-fake16.s
@@ -217,6 +217,66 @@ v_mad_nc_i64_i32 v[2:3], v4, v7, 12345
v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] clamp
// GFX1250: v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] clamp ; encoding: [0x02,0x80,0xfb,0xd6,0x04,0x0e,0x22,0x04]
+v_add_min_i32 v2, s4, v7, v8
+// GFX1250: v_add_min_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_min_i32 v2, v4, 0, 1
+// GFX1250: v_add_min_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_min_i32 v2, v4, 3, s2
+// GFX1250: v_add_min_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_min_i32 v2, s4, 4, v2
+// GFX1250: v_add_min_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_min_i32 v2, v4, v7, 12345
+// GFX1250: v_add_min_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_max_i32 v2, s4, v7, v8
+// GFX1250: v_add_max_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_max_i32 v2, v4, 0, 1
+// GFX1250: v_add_max_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_max_i32 v2, v4, 3, s2
+// GFX1250: v_add_max_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_max_i32 v2, s4, 4, v2
+// GFX1250: v_add_max_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_max_i32 v2, v4, v7, 12345
+// GFX1250: v_add_max_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_min_u32 v2, s4, v7, v8
+// GFX1250: v_add_min_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_min_u32 v2, v4, 0, 1
+// GFX1250: v_add_min_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_min_u32 v2, v4, 3, s2
+// GFX1250: v_add_min_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_min_u32 v2, s4, 4, v2
+// GFX1250: v_add_min_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_min_u32 v2, v4, v7, 12345
+// GFX1250: v_add_min_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_max_u32 v2, s4, v7, v8
+// GFX1250: v_add_max_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_max_u32 v2, v4, 0, 1
+// GFX1250: v_add_max_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_max_u32 v2, v4, 3, s2
+// GFX1250: v_add_max_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_max_u32 v2, s4, 4, v2
+// GFX1250: v_add_max_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_max_u32 v2, v4, v7, 12345
+// GFX1250: v_add_max_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
v_cvt_pk_bf16_f32 v5, v1, v2
// GFX1250: v_cvt_pk_bf16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x6d,0xd7,0x01,0x05,0x02,0x00]
@@ -261,3 +321,448 @@ v_cvt_pk_bf16_f32 v5, src_scc, vcc_lo mul:4
v_cvt_pk_bf16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2
// GFX1250: v_cvt_pk_bf16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6d,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, v1, v2, s3
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0x05,0x0e,0x00]
+
+v_cvt_sr_pk_bf16_f32 v5, v255, s2, s105
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6e,0xd7,0xff,0x05,0xa4,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, s1, v255, exec_hi
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0xfe,0xff,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, s105, s105, exec_lo
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6e,0xd7,0x69,0xd2,0xf8,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, vcc_lo, ttmp15, v3
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6e,0xd7,0x6a,0xf6,0x0c,0x04]
+
+v_cvt_sr_pk_bf16_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x6e,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x6e,0xd7,0x7b,0xfa,0xed,0x61]
+
+v_cvt_sr_pk_bf16_f32 v5, m0, 0.5, m0
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6e,0xd7,0x7d,0xe0,0xf5,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6e,0xd7,0x7e,0x82,0xad,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, -|exec_hi|, null, vcc_lo
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x6e,0xd7,0x7f,0xf8,0xa8,0x21]
+
+v_cvt_sr_pk_bf16_f32 v5, null, exec_lo, 0xaf123456
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x6e,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, -1, -|exec_hi|, src_scc
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x6e,0xd7,0xc1,0xfe,0xf4,0x43]
+
+v_cvt_sr_pk_bf16_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6e,0xd7,0xf0,0xfa,0xc0,0x4b]
+
+v_cvt_sr_pk_bf16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6e,0xd7,0xfd,0xd4,0x04,0x33]
+
+v_cvt_sr_pk_bf16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX1250: v_cvt_sr_pk_bf16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6e,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_ashr_pk_i8_i32 v2, s4, v7, v8
+// GFX1250: v_ashr_pk_i8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0e,0x22,0x04]
+
+v_ashr_pk_i8_i32 v2, v4, 0, 1
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x01,0x05,0x02]
+
+v_ashr_pk_i8_i32 v2, v4, 3, s2
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x07,0x09,0x00]
+
+v_ashr_pk_i8_i32 v2, s4, 4, v2
+// GFX1250: v_ashr_pk_i8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x08,0x09,0x04]
+
+v_ashr_pk_i8_i32 v2, v4, v7, 12345
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_ashr_pk_i8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1]
+// GFX1250: v_ashr_pk_i8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x90,0xd6,0x02,0x07,0x12,0x04]
+
+v_ashr_pk_u8_i32 v2, s4, v7, v8
+// GFX1250: v_ashr_pk_u8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0e,0x22,0x04]
+
+v_ashr_pk_u8_i32 v2, v4, 0, 1
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x01,0x05,0x02]
+
+v_ashr_pk_u8_i32 v2, v4, 3, s2
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x07,0x09,0x00]
+
+v_ashr_pk_u8_i32 v2, s4, 4, v2
+// GFX1250: v_ashr_pk_u8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x08,0x09,0x04]
+
+v_ashr_pk_u8_i32 v2, v4, v7, 12345
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_ashr_pk_u8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1]
+// GFX1250: v_ashr_pk_u8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x91,0xd6,0x02,0x07,0x12,0x04]
+
+v_cvt_pk_bf8_f16 v1, v2 op_sel:[0,0]
+// GFX1250: v_cvt_pk_bf8_f16 v1, v2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, v2 op_sel:[0,1]
+// GFX1250: v_cvt_pk_bf8_f16 v1, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, v2 clamp
+// GFX1250: v_cvt_pk_bf8_f16 v1, v2 clamp ; encoding: [0x01,0x80,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, s2
+// GFX1250: v_cvt_pk_bf8_f16 v1, s2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x00,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, 100.0
+// GFX1250: v_cvt_pk_bf8_f16 v1, 0x5640 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+// Inline constants are not supported by v_cvt_pk_bf8_f16
+
+v_cvt_pk_bf8_f16 v1, 1
+// GFX1250: v_cvt_pk_bf8_f16 v1, 1 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, 0x3800
+// GFX1250: v_cvt_pk_bf8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, 0.5
+// GFX1250: v_cvt_pk_bf8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, 0x3118
+// GFX1250: v_cvt_pk_bf8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1, 0.15915494
+// GFX1250: v_cvt_pk_bf8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, v2 op_sel:[0,0]
+// GFX1250: v_cvt_pk_fp8_f16 v1, v2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, v2 op_sel:[0,1]
+// GFX1250: v_cvt_pk_fp8_f16 v1, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, v2 clamp
+// GFX1250: v_cvt_pk_fp8_f16 v1, v2 clamp ; encoding: [0x01,0x80,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, s2
+// GFX1250: v_cvt_pk_fp8_f16 v1, s2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x00,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, 100.0
+// GFX1250: v_cvt_pk_fp8_f16 v1, 0x5640 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+// Inline constants are not supported by v_cvt_pk_fp8_f16
+
+v_cvt_pk_fp8_f16 v1, 1
+// GFX1250: v_cvt_pk_fp8_f16 v1, 1 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, 0x3800
+// GFX1250: v_cvt_pk_fp8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, 0.5
+// GFX1250: v_cvt_pk_fp8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, 0x3118
+// GFX1250: v_cvt_pk_fp8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1, 0.15915494
+// GFX1250: v_cvt_pk_fp8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, v1, v2
+// GFX1250: v_cvt_pk_f16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_f16_f32 v5, v255, v255
+// GFX1250: v_cvt_pk_f16_f32 v5, v255, v255 ; encoding: [0x05,0x00,0x6f,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_f16_f32 v5, s1, s2
+// GFX1250: v_cvt_pk_f16_f32 v5, s1, s2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, s105, s105
+// GFX1250: v_cvt_pk_f16_f32 v5, s105, s105 ; encoding: [0x05,0x00,0x6f,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, vcc_lo, ttmp15
+// GFX1250: v_cvt_pk_f16_f32 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x6f,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, vcc_hi, 0xaf123456
+// GFX1250: v_cvt_pk_f16_f32 v5, vcc_hi, 0xaf123456 ; encoding: [0x05,0x00,0x6f,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_f16_f32 v5, ttmp15, src_scc
+// GFX1250: v_cvt_pk_f16_f32 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x6f,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, m0, 0.5
+// GFX1250: v_cvt_pk_f16_f32 v5, m0, 0.5 ; encoding: [0x05,0x00,0x6f,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, exec_lo, -1
+// GFX1250: v_cvt_pk_f16_f32 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x6f,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, exec_hi, null
+// GFX1250: v_cvt_pk_f16_f32 v5, exec_hi, null ; encoding: [0x05,0x00,0x6f,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, null, exec_lo
+// GFX1250: v_cvt_pk_f16_f32 v5, null, exec_lo ; encoding: [0x05,0x00,0x6f,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, -1, exec_hi
+// GFX1250: v_cvt_pk_f16_f32 v5, -1, exec_hi ; encoding: [0x05,0x00,0x6f,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, 0.5, m0 mul:2
+// GFX1250: v_cvt_pk_f16_f32 v5, 0.5, m0 mul:2 ; encoding: [0x05,0x00,0x6f,0xd7,0xf0,0xfa,0x00,0x08]
+
+v_cvt_pk_f16_f32 v5, src_scc, vcc_lo mul:4
+// GFX1250: v_cvt_pk_f16_f32 v5, src_scc, vcc_lo mul:4 ; encoding: [0x05,0x00,0x6f,0xd7,0xfd,0xd4,0x00,0x10]
+
+v_cvt_pk_f16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2
+// GFX1250: v_cvt_pk_f16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6f,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, v1, v2, s3
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x70,0xd7,0x01,0x05,0x0e,0x00]
+
+v_cvt_sr_pk_f16_f32 v5, v255, s2, s105
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x70,0xd7,0xff,0x05,0xa4,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, s1, v255, exec_hi
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x70,0xd7,0x01,0xfe,0xff,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, s105, s105, exec_lo
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x70,0xd7,0x69,0xd2,0xf8,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, vcc_lo, ttmp15, v3
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x70,0xd7,0x6a,0xf6,0x0c,0x04]
+
+v_cvt_sr_pk_f16_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x70,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x70,0xd7,0x7b,0xfa,0xed,0x61]
+
+v_cvt_sr_pk_f16_f32 v5, m0, 0.5, m0
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x70,0xd7,0x7d,0xe0,0xf5,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x70,0xd7,0x7e,0x82,0xad,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, -|exec_hi|, null, vcc_lo
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x70,0xd7,0x7f,0xf8,0xa8,0x21]
+
+v_cvt_sr_pk_f16_f32 v5, null, exec_lo, 0xaf123456
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x70,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, -1, -|exec_hi|, src_scc
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x70,0xd7,0xc1,0xfe,0xf4,0x43]
+
+v_cvt_sr_pk_f16_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x70,0xd7,0xf0,0xfa,0xc0,0x4b]
+
+v_cvt_sr_pk_f16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x70,0xd7,0xfd,0xd4,0x04,0x33]
+
+v_cvt_sr_pk_f16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX1250: v_cvt_sr_pk_f16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x70,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_bf8_f16 v1, v2, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1]
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:0
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, s3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, 0x1234
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+v_cvt_sr_bf8_f16 v1, -v2, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_sr_bf8_f16 v1, |v2|, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, |v2|, v3 ; encoding: [0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, |v2|, v3 op_sel:[1]
+// GFX1250: v_cvt_sr_bf8_f16 v1, |v2|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 ; encoding: [0x01,0x40,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 ; encoding: [0x01,0x20,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:3 ; encoding: [0x01,0x60,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] byte_sel:1
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] byte_sel:2
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] byte_sel:3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1]
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, s3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, 0x1234
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+v_cvt_sr_fp8_f16 v1, -v2, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_sr_fp8_f16 v1, |v2|, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, |v2|, v3 ; encoding: [0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, |v2|, v3 op_sel:[1]
+// GFX1250: v_cvt_sr_fp8_f16 v1, |v2|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 ; encoding: [0x01,0x40,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 ; encoding: [0x01,0x20,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:3 ; encoding: [0x01,0x60,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] byte_sel:1
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] byte_sel:2
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] byte_sel:3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, v2, v3
+// GFX1250: v_cvt_pk_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1]
+// GFX1250: v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1] ; encoding: [0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, -v2, |v3|
+// GFX1250: v_cvt_pk_fp8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_pk_fp8_f32 v1, s2, 3
+// GFX1250: v_cvt_pk_fp8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00]
+
+v_cvt_pk_fp8_f32 v1, v2, v3 clamp
+// GFX1250: v_cvt_pk_fp8_f32 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1] clamp
+// GFX1250: v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1] clamp ; encoding: [0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_bf8_f32 v1, v2, v3
+// GFX1250: v_cvt_pk_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_bf8_f32 v1, -v2, |v3|
+// GFX1250: v_cvt_pk_bf8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_pk_bf8_f32 v1, s2, 3
+// GFX1250: v_cvt_pk_bf8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00]
+
+v_cvt_sr_fp8_f32 v1, v2, v3
+// GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v10, s2, v5
+// GFX1250: v_cvt_sr_fp8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6b,0xd7,0x02,0x0a,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v5, -|v255|, v4
+// GFX1250: v_cvt_sr_fp8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6b,0xd7,0xff,0x09,0x02,0x20]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 clamp
+// GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v1, v2, v3
+// GFX1250: v_cvt_sr_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6c,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v10, s2, v5
+// GFX1250: v_cvt_sr_bf8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6c,0xd7,0x02,0x0a,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v5, -|v255|, v4
+// GFX1250: v_cvt_sr_bf8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6c,0xd7,0xff,0x09,0x02,0x20]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 scale_sel:5
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 scale_sel:5 ; encoding: [0x0a,0x28,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 scale_sel:6
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 scale_sel:7
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 scale_sel:1
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 scale_sel:1 ; encoding: [0x0a,0x08,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 scale_sel:2
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 scale_sel:2 ; encoding: [0x0a,0x10,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 scale_sel:3
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 scale_sel:3 ; encoding: [0x0a,0x18,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:6
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 scale_sel:7
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 scale_sel:1
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 scale_sel:1 ; encoding: [0x0a,0x08,0xa1,0xd6,0x14,0x11,0x02,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s
index ebfeb3f..03f642d 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3.s
@@ -217,6 +217,66 @@ v_mad_nc_i64_i32 v[2:3], v4, v7, 12345
v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] clamp
// GFX1250: v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] clamp ; encoding: [0x02,0x80,0xfb,0xd6,0x04,0x0e,0x22,0x04]
+v_add_min_i32 v2, s4, v7, v8
+// GFX1250: v_add_min_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_min_i32 v2, v4, 0, 1
+// GFX1250: v_add_min_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_min_i32 v2, v4, 3, s2
+// GFX1250: v_add_min_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_min_i32 v2, s4, 4, v2
+// GFX1250: v_add_min_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_min_i32 v2, v4, v7, 12345
+// GFX1250: v_add_min_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_max_i32 v2, s4, v7, v8
+// GFX1250: v_add_max_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_max_i32 v2, v4, 0, 1
+// GFX1250: v_add_max_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_max_i32 v2, v4, 3, s2
+// GFX1250: v_add_max_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_max_i32 v2, s4, 4, v2
+// GFX1250: v_add_max_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_max_i32 v2, v4, v7, 12345
+// GFX1250: v_add_max_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_min_u32 v2, s4, v7, v8
+// GFX1250: v_add_min_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_min_u32 v2, v4, 0, 1
+// GFX1250: v_add_min_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_min_u32 v2, v4, 3, s2
+// GFX1250: v_add_min_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_min_u32 v2, s4, 4, v2
+// GFX1250: v_add_min_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_min_u32 v2, v4, v7, 12345
+// GFX1250: v_add_min_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_add_max_u32 v2, s4, v7, v8
+// GFX1250: v_add_max_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0e,0x22,0x04]
+
+v_add_max_u32 v2, v4, 0, 1
+// GFX1250: v_add_max_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x01,0x05,0x02]
+
+v_add_max_u32 v2, v4, 3, s2
+// GFX1250: v_add_max_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x07,0x09,0x00]
+
+v_add_max_u32 v2, s4, 4, v2
+// GFX1250: v_add_max_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x08,0x09,0x04]
+
+v_add_max_u32 v2, v4, v7, 12345
+// GFX1250: v_add_max_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
v_cvt_pk_bf16_f32 v5, v1, v2
// GFX1250: v_cvt_pk_bf16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x6d,0xd7,0x01,0x05,0x02,0x00]
@@ -261,3 +321,448 @@ v_cvt_pk_bf16_f32 v5, src_scc, vcc_lo mul:4
v_cvt_pk_bf16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2
// GFX1250: v_cvt_pk_bf16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6d,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, v1, v2, s3
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0x05,0x0e,0x00]
+
+v_cvt_sr_pk_bf16_f32 v5, v255, s2, s105
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6e,0xd7,0xff,0x05,0xa4,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, s1, v255, exec_hi
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0xfe,0xff,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, s105, s105, exec_lo
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6e,0xd7,0x69,0xd2,0xf8,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, vcc_lo, ttmp15, v3
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6e,0xd7,0x6a,0xf6,0x0c,0x04]
+
+v_cvt_sr_pk_bf16_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x6e,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x6e,0xd7,0x7b,0xfa,0xed,0x61]
+
+v_cvt_sr_pk_bf16_f32 v5, m0, 0.5, m0
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6e,0xd7,0x7d,0xe0,0xf5,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6e,0xd7,0x7e,0x82,0xad,0x01]
+
+v_cvt_sr_pk_bf16_f32 v5, -|exec_hi|, null, vcc_lo
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x6e,0xd7,0x7f,0xf8,0xa8,0x21]
+
+v_cvt_sr_pk_bf16_f32 v5, null, exec_lo, 0xaf123456
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x6e,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_bf16_f32 v5, -1, -|exec_hi|, src_scc
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x6e,0xd7,0xc1,0xfe,0xf4,0x43]
+
+v_cvt_sr_pk_bf16_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6e,0xd7,0xf0,0xfa,0xc0,0x4b]
+
+v_cvt_sr_pk_bf16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX1250: v_cvt_sr_pk_bf16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6e,0xd7,0xfd,0xd4,0x04,0x33]
+
+v_cvt_sr_pk_bf16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX1250: v_cvt_sr_pk_bf16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6e,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_ashr_pk_i8_i32 v2, s4, v7, v8
+// GFX1250: v_ashr_pk_i8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0e,0x22,0x04]
+
+v_ashr_pk_i8_i32 v2, v4, 0, 1
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x01,0x05,0x02]
+
+v_ashr_pk_i8_i32 v2, v4, 3, s2
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x07,0x09,0x00]
+
+v_ashr_pk_i8_i32 v2, s4, 4, v2
+// GFX1250: v_ashr_pk_i8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x08,0x09,0x04]
+
+v_ashr_pk_i8_i32 v2, v4, v7, 12345
+// GFX1250: v_ashr_pk_i8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_ashr_pk_i8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1]
+// GFX1250: v_ashr_pk_i8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x90,0xd6,0x02,0x07,0x12,0x04]
+
+v_ashr_pk_u8_i32 v2, s4, v7, v8
+// GFX1250: v_ashr_pk_u8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0e,0x22,0x04]
+
+v_ashr_pk_u8_i32 v2, v4, 0, 1
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x01,0x05,0x02]
+
+v_ashr_pk_u8_i32 v2, v4, 3, s2
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x07,0x09,0x00]
+
+v_ashr_pk_u8_i32 v2, s4, 4, v2
+// GFX1250: v_ashr_pk_u8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x08,0x09,0x04]
+
+v_ashr_pk_u8_i32 v2, v4, v7, 12345
+// GFX1250: v_ashr_pk_u8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+v_ashr_pk_u8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1]
+// GFX1250: v_ashr_pk_u8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x91,0xd6,0x02,0x07,0x12,0x04]
+
+v_cvt_pk_bf8_f16 v1.l, v2
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, v2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.h, v2
+// GFX1250: v_cvt_pk_bf8_f16 v1.h, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v0.l, v2 clamp
+// GFX1250: v_cvt_pk_bf8_f16 v0.l, v2 clamp ; encoding: [0x00,0x80,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, s2
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, s2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x00,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, 100.0
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 0x5640 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+// Inline constants are not supported by v_cvt_pk_bf8_f16
+
+v_cvt_pk_bf8_f16 v1.l, 1
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 1 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, 0x3800
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, 0.5
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, 0x3118
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_bf8_f16 v1.l, 0.15915494
+// GFX1250: v_cvt_pk_bf8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, v2
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, v2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.h, v2
+// GFX1250: v_cvt_pk_fp8_f16 v1.h, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, v2 clamp
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, v2 clamp ; encoding: [0x01,0x80,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, s2
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, s2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x00,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, 100.0
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x5640 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+// Inline constants are not supported by v_cvt_pk_fp8_f16
+
+v_cvt_pk_fp8_f16 v1.l, 1
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 1 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, 0x3800
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, 0.5
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, 0x3118
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_fp8_f16 v1.l, 0.15915494
+// GFX1250: v_cvt_pk_fp8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, v1, v2
+// GFX1250: v_cvt_pk_f16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x05,0x02,0x00]
+
+v_cvt_pk_f16_f32 v5, v255, v255
+// GFX1250: v_cvt_pk_f16_f32 v5, v255, v255 ; encoding: [0x05,0x00,0x6f,0xd7,0xff,0xff,0x03,0x00]
+
+v_cvt_pk_f16_f32 v5, s1, s2
+// GFX1250: v_cvt_pk_f16_f32 v5, s1, s2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x04,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, s105, s105
+// GFX1250: v_cvt_pk_f16_f32 v5, s105, s105 ; encoding: [0x05,0x00,0x6f,0xd7,0x69,0xd2,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, vcc_lo, ttmp15
+// GFX1250: v_cvt_pk_f16_f32 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x6f,0xd7,0x6a,0xf6,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, vcc_hi, 0xaf123456
+// GFX1250: v_cvt_pk_f16_f32 v5, vcc_hi, 0xaf123456 ; encoding: [0x05,0x00,0x6f,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cvt_pk_f16_f32 v5, ttmp15, src_scc
+// GFX1250: v_cvt_pk_f16_f32 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x6f,0xd7,0x7b,0xfa,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, m0, 0.5
+// GFX1250: v_cvt_pk_f16_f32 v5, m0, 0.5 ; encoding: [0x05,0x00,0x6f,0xd7,0x7d,0xe0,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, exec_lo, -1
+// GFX1250: v_cvt_pk_f16_f32 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x6f,0xd7,0x7e,0x82,0x01,0x00]
+
+v_cvt_pk_f16_f32 v5, exec_hi, null
+// GFX1250: v_cvt_pk_f16_f32 v5, exec_hi, null ; encoding: [0x05,0x00,0x6f,0xd7,0x7f,0xf8,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, null, exec_lo
+// GFX1250: v_cvt_pk_f16_f32 v5, null, exec_lo ; encoding: [0x05,0x00,0x6f,0xd7,0x7c,0xfc,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, -1, exec_hi
+// GFX1250: v_cvt_pk_f16_f32 v5, -1, exec_hi ; encoding: [0x05,0x00,0x6f,0xd7,0xc1,0xfe,0x00,0x00]
+
+v_cvt_pk_f16_f32 v5, 0.5, m0 mul:2
+// GFX1250: v_cvt_pk_f16_f32 v5, 0.5, m0 mul:2 ; encoding: [0x05,0x00,0x6f,0xd7,0xf0,0xfa,0x00,0x08]
+
+v_cvt_pk_f16_f32 v5, src_scc, vcc_lo mul:4
+// GFX1250: v_cvt_pk_f16_f32 v5, src_scc, vcc_lo mul:4 ; encoding: [0x05,0x00,0x6f,0xd7,0xfd,0xd4,0x00,0x10]
+
+v_cvt_pk_f16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2
+// GFX1250: v_cvt_pk_f16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6f,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, v1, v2, s3
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x70,0xd7,0x01,0x05,0x0e,0x00]
+
+v_cvt_sr_pk_f16_f32 v5, v255, s2, s105
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x70,0xd7,0xff,0x05,0xa4,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, s1, v255, exec_hi
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x70,0xd7,0x01,0xfe,0xff,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, s105, s105, exec_lo
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x70,0xd7,0x69,0xd2,0xf8,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, vcc_lo, ttmp15, v3
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x70,0xd7,0x6a,0xf6,0x0c,0x04]
+
+v_cvt_sr_pk_f16_f32 v5, vcc_hi, 0xaf123456, v255
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x70,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x70,0xd7,0x7b,0xfa,0xed,0x61]
+
+v_cvt_sr_pk_f16_f32 v5, m0, 0.5, m0
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x70,0xd7,0x7d,0xe0,0xf5,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, |exec_lo|, -1, vcc_hi
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x70,0xd7,0x7e,0x82,0xad,0x01]
+
+v_cvt_sr_pk_f16_f32 v5, -|exec_hi|, null, vcc_lo
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x70,0xd7,0x7f,0xf8,0xa8,0x21]
+
+v_cvt_sr_pk_f16_f32 v5, null, exec_lo, 0xaf123456
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x70,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_pk_f16_f32 v5, -1, -|exec_hi|, src_scc
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x70,0xd7,0xc1,0xfe,0xf4,0x43]
+
+v_cvt_sr_pk_f16_f32 v5, 0.5, -m0, 0.5 mul:2
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x70,0xd7,0xf0,0xfa,0xc0,0x4b]
+
+v_cvt_sr_pk_f16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4
+// GFX1250: v_cvt_sr_pk_f16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x70,0xd7,0xfd,0xd4,0x04,0x33]
+
+v_cvt_sr_pk_f16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2
+// GFX1250: v_cvt_sr_pk_f16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x70,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:0
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, s3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2, 0x1234
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+v_cvt_sr_bf8_f16 v1, -v2, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_sr_bf8_f16 v1, |v2.l|, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, |v2.h|, v3
+// GFX1250: v_cvt_sr_bf8_f16 v1, |v2.h|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:2
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:2 ; encoding: [0x01,0x40,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:1
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:1 ; encoding: [0x01,0x20,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3 ; encoding: [0x01,0x60,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 byte_sel:1
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 byte_sel:2
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 byte_sel:3
+// GFX1250: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, s3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2, 0x1234
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+v_cvt_sr_fp8_f16 v1, -v2, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_sr_fp8_f16 v1, |v2.l|, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, |v2.h|, v3
+// GFX1250: v_cvt_sr_fp8_f16 v1, |v2.h|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:2
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:2 ; encoding: [0x01,0x40,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:1
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:1 ; encoding: [0x01,0x20,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3 ; encoding: [0x01,0x60,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 byte_sel:1
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 byte_sel:2
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 byte_sel:3
+// GFX1250: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1.l, v2, v3
+// GFX1250: v_cvt_pk_fp8_f32 v1.l, v2, v3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1.h, v2, v3
+// GFX1250: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] ; encoding: [0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1, -v2, |v3|
+// GFX1250: v_cvt_pk_fp8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_pk_fp8_f32 v1, s2, 3
+// GFX1250: v_cvt_pk_fp8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00]
+
+v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp
+// GFX1250: v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp ; encoding: [0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_fp8_f32 v1.h, v2, v3 clamp
+// GFX1250: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] clamp ; encoding: [0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_bf8_f32 v1, v2, v3
+// GFX1250: v_cvt_pk_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_pk_bf8_f32 v1, -v2, |v3|
+// GFX1250: v_cvt_pk_bf8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
+
+v_cvt_pk_bf8_f32 v1, s2, 3
+// GFX1250: v_cvt_pk_bf8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00]
+
+v_cvt_sr_fp8_f32 v1, v2, v3
+// GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v10, s2, v5
+// GFX1250: v_cvt_sr_fp8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6b,0xd7,0x02,0x0a,0x02,0x00]
+
+v_cvt_sr_fp8_f32 v5, -|v255|, v4
+// GFX1250: v_cvt_sr_fp8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6b,0xd7,0xff,0x09,0x02,0x20]
+
+v_cvt_sr_fp8_f32 v1, v2, v3 clamp
+// GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v1, v2, v3
+// GFX1250: v_cvt_sr_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6c,0xd7,0x02,0x07,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v10, s2, v5
+// GFX1250: v_cvt_sr_bf8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6c,0xd7,0x02,0x0a,0x02,0x00]
+
+v_cvt_sr_bf8_f32 v5, -|v255|, v4
+// GFX1250: v_cvt_sr_bf8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6c,0xd7,0xff,0x09,0x02,0x20]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 scale_sel:5
+// GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 scale_sel:5 ; encoding: [0x0a,0x28,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 scale_sel:6
+// GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 scale_sel:7
+// GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 scale_sel:1
+// GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 scale_sel:1 ; encoding: [0x0a,0x08,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 scale_sel:2
+// GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 scale_sel:2 ; encoding: [0x0a,0x10,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 scale_sel:3
+// GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 scale_sel:3 ; encoding: [0x0a,0x18,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:6
+// GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 scale_sel:7
+// GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0x11,0x02,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 scale_sel:1
+// GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 scale_sel:1 ; encoding: [0x0a,0x08,0xa1,0xd6,0x14,0x11,0x02,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16-fake16.s
index d9c7645..a926f7e 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16-fake16.s
@@ -146,6 +146,54 @@ v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:102 op_sel:[1,1,1,1] quad_perm:[0,1,2
// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x66 op_sel:[1,1,1,1] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x7c,0x33,0xd6,0xfa,0x04,0x0e,0xcc,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+v_add_min_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -201,3 +249,263 @@ v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x
v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6d,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x82,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:40: error: invalid operand for instruction
+
+v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| op_sel:[0,0,1] clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| op_sel:[0,0,1] clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0xc2,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:61: error: not a valid operand.
+
+v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x80,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6e,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x6e,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x6e,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16 v1, v2 op_sel:[0,0] quad_perm:[1,2,3,0]
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16 v1, v2 op_sel:[0,0] quad_perm:[1,2,3,0]
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x08,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x10,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6f,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x70,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x70,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x70,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s
index ccf50b2..f766e52 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp16.s
@@ -146,6 +146,54 @@ v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, v3.h bitop3:102 quad_perm:[0,1,2,3]
// GFX1250: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, v3.h bitop3:0x66 op_sel:[1,1,1,1] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x7c,0x33,0xd6,0xfa,0x04,0x0e,0xcc,0x01,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+v_add_min_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -201,3 +249,263 @@ v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x
v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6d,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f32_e64_dpp v1.l, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v1.l, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x82,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:42: error: invalid operand for instruction
+
+v_cvt_pk_fp8_f32_e64_dpp v1.h, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v1.h, -v2, |v3| op_sel:[0,0,1] clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0xc2,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:42: error: invalid operand for instruction
+
+v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd
+// GFX1250: v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x80,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+// GFX12-ERR: :[[@LINE-2]]:38: error: invalid operand for instruction
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6e,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x6e,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x6e,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, v8 quad_perm:[1,2,3,1]
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, v8 row_share:3 fi:1
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16 v1.l, v2 quad_perm:[1,2,3,0]
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 row_share:0 row_mask:0x5 bank_mask:0x3 fi:1
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16 v1.l, v2 quad_perm:[1,2,3,0]
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 row_share:0 row_mask:0x5 bank_mask:0x3 fi:1
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_mirror
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_half_mirror
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:15
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x08,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x10,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6f,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 row_mirror
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 row_shl:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x70,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x70,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x70,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 quad_perm:[0,1,2,3] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 byte_sel:3 quad_perm:[0,1,2,3]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8-fake16.s
index 40d27c8..3b864b9 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8-fake16.s
@@ -122,6 +122,38 @@ v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:102 op_sel:[1,1,1,1] dpp8:[0,0,0,0,0,
// GFX1250: v_bitop3_b16_e64_dpp v5, v1, v2, v3 bitop3:0x66 op_sel:[1,1,1,1] dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x7c,0x33,0xd6,0xe9,0x04,0x0e,0xcc,0x01,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+v_add_min_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_min_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_min_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x60,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_max_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_max_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5e,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_min_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_min_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x61,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_max_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_max_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5f,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -137,3 +169,191 @@ v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6d,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:37: error: invalid operand for instruction
+
+v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 op_sel:[0,0,1] clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 op_sel:[0,0,1] clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0xc0,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:58: error: not a valid operand.
+
+v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x6b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:37: error: invalid operand for instruction
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6e,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6e,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6e,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x6e,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x90,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x90,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x91,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x91,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16 v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x73,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16 v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x72,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x08,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6f,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x70,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x70,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x70,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x70,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x70,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8.s
index fb5593d..c726a0d 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_dpp8.s
@@ -122,6 +122,38 @@ v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, v3.h bitop3:102 dpp8:[0,0,0,0,0,0,0,0]
// GFX1250: v_bitop3_b16_e64_dpp v5.h, v1.h, v2.h, v3.h bitop3:0x66 op_sel:[1,1,1,1] dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0x05,0x7c,0x33,0xd6,0xe9,0x04,0x0e,0xcc,0x01,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+v_add_min_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_min_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_i32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_min_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x60,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_max_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_i32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_max_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5e,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_min_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_min_u32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_min_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x61,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_add_max_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_add_max_u32 v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_add_max_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5f,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6d,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -137,3 +169,191 @@ v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
// GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6d,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f32_e64_dpp v5.l, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v5.l, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:39: error: invalid operand for instruction
+
+v_cvt_pk_fp8_f32_e64_dpp v5.h, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_pk_fp8_f32_e64_dpp v5.h, v1, v2 op_sel:[0,0,1] clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0xc0,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:39: error: invalid operand for instruction
+
+v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1]
+// GFX1250: v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x6b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+// GFX12-ERR: :[[@LINE-2]]:37: error: invalid operand for instruction
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6e,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6e,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6e,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x6e,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x90,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_i8_i32 v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x90,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x91,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_ashr_pk_u8_i32 v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x91,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x73,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16 v1.l, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x72,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x08,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6f,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x70,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x70,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x70,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x70,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x70,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_bf8_f16 v1, v2.h, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+
+v_cvt_sr_fp8_f16 v1, v2.h, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0]
+// GFX1250: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s
index 7e29d04..c5bd00c 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s
@@ -86,3 +86,78 @@ v_mad_nc_i64_i32 v[4:5], v2, v5, v[6:7] quad_perm:[3,2,1,0]
// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
// GFX125X-ERR-NEXT:{{^}}v_mad_nc_i64_i32 v[4:5], v2, v5, v[6:7] quad_perm:[3,2,1,0]
// GFX125X-ERR-NEXT:{{^}} ^
+
+v_ashr_pk_i8_i32 v1, v2, v3, v4 clamp
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_ashr_pk_i8_i32 v1, v2, v3, v4 clamp
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_ashr_pk_u8_i32 v1, v2, v3, v4 clamp
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_ashr_pk_u8_i32 v1, v2, v3, v4 clamp
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_sr_bf8_f16 v1, v2, v3 clamp
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_sr_bf8_f16 v1, v2, v3 clamp
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_sr_bf8_f16 v1, v2, v3 mul:2
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_cvt_sr_bf8_f16 v1, v2, v3 mul:2
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_sr_fp8_f16 v1, v2, v3 clamp
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_sr_fp8_f16 v1, v2, v3 clamp
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_sr_fp8_f16 v1, v2, v3 mul:2
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_cvt_sr_fp8_f16 v1, v2, v3 mul:2
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid scale_sel value.
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:4
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid byte_sel value.
+// GFX125X-ERR-NEXT:{{^}}v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:4
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], s[20:21], v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f16_fp8 v[10:13], s[20:21], v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f16_fp8 v[10:13], 1, v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f16_fp8 v[10:13], 1, v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_bf16_fp8 v[10:13], s[20:21], v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_bf16_fp8 v[10:13], s[20:21], v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f32_fp8 v[10:17], s[20:21], v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f32_fp8 v[10:17], s[20:21], v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f16_fp4 v[10:13], s20, v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f16_fp4 v[10:13], s20, v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_bf16_fp4 v[10:13], s20, v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_bf16_fp4 v[10:13], s20, v8
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk8_f32_fp4 v[10:17], s20, v8
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f32_fp4 v[10:17], s20, v8
+// GFX125X-ERR-NEXT:{{^}} ^
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop2_err.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop2_err.s
new file mode 100644
index 0000000..b7d93e1
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop2_err.s
@@ -0,0 +1,20 @@
+// NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --unique --sort --version 5
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 %s 2>&1 | FileCheck --check-prefix=GFX12 --implicit-check-not=error: %s
+
+v_pk_fmac_f16 v0, v1, v2 quad_perm:[1,2,3,0]
+// GFX12: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16 v0, v1, v2 quad_perm:[1,2,3,0] row_mask:0x0 bank_mask:0x0
+// GFX12: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16 v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: :[[@LINE-1]]:26: error: not a valid operand.
+
+v_pk_fmac_f16_dpp v0, v1, v2 quad_perm:[1,2,3,0]
+// GFX12: :[[@LINE-1]]:1: error: dpp variant of this instruction is not supported
+
+v_pk_fmac_f16_dpp v0, v1, v2 quad_perm:[1,2,3,0] row_mask:0x0 bank_mask:0x0
+// GFX12: :[[@LINE-1]]:1: error: dpp variant of this instruction is not supported
+
+v_pk_fmac_f16_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: :[[@LINE-1]]:1: error: dpp variant of this instruction is not supported
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3.txt
index 9fd7edd..ce8cfcb 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3.txt
@@ -236,6 +236,66 @@
0x02,0x80,0xfb,0xd6,0x04,0x0e,0x22,0x04
# GFX1250: v_mad_nc_i64_i32 v[2:3], s4, v7, v[8:9] clamp ; encoding: [0x02,0x80,0xfb,0xd6,0x04,0x0e,0x22,0x04]
+0x02,0x00,0x60,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_add_min_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x60,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_add_min_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x60,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_add_min_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x60,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_add_min_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x60,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_add_min_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x60,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x02,0x00,0x5e,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_add_max_i32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x5e,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_add_max_i32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x5e,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_add_max_i32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x5e,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_add_max_i32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x5e,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_add_max_i32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5e,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x02,0x00,0x61,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_add_min_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x61,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_add_min_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x61,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_add_min_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x61,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_add_min_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x61,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_add_min_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x61,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x02,0x00,0x5f,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_add_max_u32_e64 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x5f,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_add_max_u32_e64 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x5f,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_add_max_u32_e64 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x5f,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_add_max_u32_e64 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x5f,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_add_max_u32_e64 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x5f,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
0xff,0x81,0x6d,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf
# GFX1250: v_cvt_pk_bf16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6d,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
@@ -281,6 +341,482 @@
0x05,0x00,0x6d,0xd7,0x6a,0xf6,0x00,0x00
# GFX1250: v_cvt_pk_bf16_f32 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x6d,0xd7,0x6a,0xf6,0x00,0x00]
-## NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-# GFX1250-FAKE16: {{.*}}
-# GFX1250-REAL16: {{.*}}
+0xff,0x83,0x6e,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_bf16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6e,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+0x05,0x02,0x6e,0xd7,0xc1,0xfe,0xf4,0x43
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x6e,0xd7,0xc1,0xfe,0xf4,0x43]
+
+0x05,0x02,0x6e,0xd7,0xfd,0xd4,0x04,0x33
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6e,0xd7,0xfd,0xd4,0x04,0x33]
+
+0x05,0x01,0x6e,0xd7,0x7f,0xf8,0xa8,0x21
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x6e,0xd7,0x7f,0xf8,0xa8,0x21]
+
+0x05,0x03,0x6e,0xd7,0x7b,0xfa,0xed,0x61
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x6e,0xd7,0x7b,0xfa,0xed,0x61]
+
+0x05,0x00,0x6e,0xd7,0xf0,0xfa,0xfc,0x4b,0x00,0x38,0x00,0x00
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, 0.5, -m0, 0x3800 mul:2 ; encoding: [0x05,0x00,0x6e,0xd7,0xf0,0xfa,0xfc,0x4b,0x00,0x38,0x00,0x00]
+
+0x05,0x00,0x6e,0xd7,0x7d,0xe0,0xf5,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6e,0xd7,0x7d,0xe0,0xf5,0x01]
+
+0x05,0x00,0x6e,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x6e,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x6e,0xd7,0x01,0xfe,0xff,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0xfe,0xff,0x01]
+
+0x05,0x00,0x6e,0xd7,0x69,0xd2,0xf8,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6e,0xd7,0x69,0xd2,0xf8,0x01]
+
+0x05,0x00,0x6e,0xd7,0x01,0x05,0x0e,0x00
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6e,0xd7,0x01,0x05,0x0e,0x00]
+
+0x05,0x00,0x6e,0xd7,0xff,0x05,0xa4,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6e,0xd7,0xff,0x05,0xa4,0x01]
+
+0x05,0x00,0x6e,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x6e,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x6e,0xd7,0x6a,0xf6,0x0c,0x04
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6e,0xd7,0x6a,0xf6,0x0c,0x04]
+
+0x05,0x01,0x6e,0xd7,0x7e,0x82,0xad,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6e,0xd7,0x7e,0x82,0xad,0x01]
+
+0x02,0x00,0x90,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_ashr_pk_i8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x90,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_ashr_pk_i8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x90,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_ashr_pk_i8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x90,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_ashr_pk_i8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x90,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_ashr_pk_i8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x90,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x01,0x40,0x90,0xd6,0x02,0x07,0x12,0x04
+# GFX1250: v_ashr_pk_i8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x90,0xd6,0x02,0x07,0x12,0x04]
+
+0x02,0x00,0x91,0xd6,0x04,0x08,0x09,0x04
+# GFX1250: v_ashr_pk_u8_i32 v2, s4, 4, v2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x08,0x09,0x04]
+
+0x02,0x00,0x91,0xd6,0x04,0x0e,0x22,0x04
+# GFX1250: v_ashr_pk_u8_i32 v2, s4, v7, v8 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0e,0x22,0x04]
+
+0x02,0x00,0x91,0xd6,0x04,0x01,0x05,0x02
+# GFX1250: v_ashr_pk_u8_i32 v2, v4, 0, 1 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x01,0x05,0x02]
+
+0x02,0x00,0x91,0xd6,0x04,0x07,0x09,0x00
+# GFX1250: v_ashr_pk_u8_i32 v2, v4, 3, s2 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x07,0x09,0x00]
+
+0x02,0x00,0x91,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00
+# GFX1250: v_ashr_pk_u8_i32 v2, v4, v7, 0x3039 ; encoding: [0x02,0x00,0x91,0xd6,0x04,0x0f,0xfe,0x03,0x39,0x30,0x00,0x00]
+
+0x01,0x40,0x91,0xd6,0x02,0x07,0x12,0x04
+# GFX1250: v_ashr_pk_u8_i32 v1, v2, v3, v4 op_sel:[0,0,0,1] ; encoding: [0x01,0x40,0x91,0xd6,0x02,0x07,0x12,0x04]
+
+0x01,0x00,0x73,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, v2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, v2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x40,0x73,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.h, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x73,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x80,0x73,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, v2 clamp ; encoding: [0x01,0x80,0x73,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, v2 clamp ; encoding: [0x01,0x80,0x73,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x00,0x73,0xd7,0x02,0x00,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, s2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, s2 ; encoding: [0x01,0x00,0x73,0xd7,0x02,0x00,0x00,0x00]
+
+0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, 0x5640 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, 0x5640 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, 1 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, 1 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x73,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, v2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, v2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x40,0x72,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.h, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x72,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, v2 op_sel:[0,1] ; encoding: [0x01,0x40,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x80,0x72,0xd7,0x02,0x01,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, v2 clamp ; encoding: [0x01,0x80,0x72,0xd7,0x02,0x01,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, v2 clamp ; encoding: [0x01,0x80,0x72,0xd7,0x02,0x01,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0x02,0x00,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, s2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, s2 ; encoding: [0x01,0x00,0x72,0xd7,0x02,0x00,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, 0x5640 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, 0x5640 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x40,0x56,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, 1 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, 1 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, 0x3800 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f16 v1.l, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16 v1, 0x3118 ; encoding: [0x01,0x00,0x72,0xd7,0xff,0x00,0x00,0x00,0x18,0x31,0x00,0x00]
+
+0xff,0x81,0x6f,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_pk_f16_f32 v255, -|0xaf123456|, vcc_hi clamp div:2 ; encoding: [0xff,0x81,0x6f,0xd7,0xff,0xd6,0x00,0x38,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x6f,0xd7,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, -1, exec_hi ; encoding: [0x05,0x00,0x6f,0xd7,0xc1,0xfe,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0xf0,0xfa,0x00,0x08
+# GFX1250: v_cvt_pk_f16_f32 v5, 0.5, m0 mul:2 ; encoding: [0x05,0x00,0x6f,0xd7,0xf0,0xfa,0x00,0x08]
+
+0x05,0x00,0x6f,0xd7,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, exec_hi, null ; encoding: [0x05,0x00,0x6f,0xd7,0x7f,0xf8,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0x7e,0x82,0x01,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, exec_lo, -1 ; encoding: [0x05,0x00,0x6f,0xd7,0x7e,0x82,0x01,0x00]
+
+0x05,0x00,0x6f,0xd7,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, m0, 0.5 ; encoding: [0x05,0x00,0x6f,0xd7,0x7d,0xe0,0x01,0x00]
+
+0x05,0x00,0x6f,0xd7,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, null, exec_lo ; encoding: [0x05,0x00,0x6f,0xd7,0x7c,0xfc,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0x01,0x04,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, s1, s2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x04,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0x69,0xd2,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, s105, s105 ; encoding: [0x05,0x00,0x6f,0xd7,0x69,0xd2,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0xfd,0xd4,0x00,0x10
+# GFX1250: v_cvt_pk_f16_f32 v5, src_scc, vcc_lo mul:4 ; encoding: [0x05,0x00,0x6f,0xd7,0xfd,0xd4,0x00,0x10]
+
+0x05,0x00,0x6f,0xd7,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, ttmp15, src_scc ; encoding: [0x05,0x00,0x6f,0xd7,0x7b,0xfa,0x01,0x00]
+
+0x05,0x00,0x6f,0xd7,0x01,0x05,0x02,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, v1, v2 ; encoding: [0x05,0x00,0x6f,0xd7,0x01,0x05,0x02,0x00]
+
+0x05,0x00,0x6f,0xd7,0xff,0xff,0x03,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, v255, v255 ; encoding: [0x05,0x00,0x6f,0xd7,0xff,0xff,0x03,0x00]
+
+0x05,0x00,0x6f,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_pk_f16_f32 v5, vcc_hi, 0xaf123456 ; encoding: [0x05,0x00,0x6f,0xd7,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x6f,0xd7,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32 v5, vcc_lo, ttmp15 ; encoding: [0x05,0x00,0x6f,0xd7,0x6a,0xf6,0x00,0x00]
+
+0xff,0x83,0x70,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_f16_f32 v255, -|0xaf123456|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x70,0xd7,0xff,0xd6,0xf0,0x79,0x56,0x34,0x12,0xaf]
+
+0x05,0x02,0x70,0xd7,0xc1,0xfe,0xf4,0x43
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, -1, -|exec_hi|, src_scc ; encoding: [0x05,0x02,0x70,0xd7,0xc1,0xfe,0xf4,0x43]
+
+0x05,0x02,0x70,0xd7,0xfd,0xd4,0x04,0x33
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x70,0xd7,0xfd,0xd4,0x04,0x33]
+
+0x05,0x01,0x70,0xd7,0x7f,0xf8,0xa8,0x21
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, -|exec_hi|, null, vcc_lo ; encoding: [0x05,0x01,0x70,0xd7,0x7f,0xf8,0xa8,0x21]
+
+0x05,0x03,0x70,0xd7,0x7b,0xfa,0xed,0x61
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, -|ttmp15|, -|src_scc|, ttmp15 ; encoding: [0x05,0x03,0x70,0xd7,0x7b,0xfa,0xed,0x61]
+
+0x05,0x00,0x70,0xd7,0xf0,0xfa,0xc0,0x4b
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x70,0xd7,0xf0,0xfa,0xc0,0x4b]
+
+0x05,0x00,0x70,0xd7,0x7d,0xe0,0xf5,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x70,0xd7,0x7d,0xe0,0xf5,0x01]
+
+0x05,0x00,0x70,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, null, exec_lo, 0xaf123456 ; encoding: [0x05,0x00,0x70,0xd7,0x7c,0xfc,0xfc,0x03,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x70,0xd7,0x01,0xfe,0xff,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x70,0xd7,0x01,0xfe,0xff,0x01]
+
+0x05,0x00,0x70,0xd7,0x69,0xd2,0xf8,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x70,0xd7,0x69,0xd2,0xf8,0x01]
+
+0x05,0x00,0x70,0xd7,0x01,0x05,0x0e,0x00
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x70,0xd7,0x01,0x05,0x0e,0x00]
+
+0x05,0x00,0x70,0xd7,0xff,0x05,0xa4,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x70,0xd7,0xff,0x05,0xa4,0x01]
+
+0x05,0x00,0x70,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_hi, 0xaf123456, v255 ; encoding: [0x05,0x00,0x70,0xd7,0x6b,0xfe,0xfd,0x07,0x56,0x34,0x12,0xaf]
+
+0x05,0x00,0x70,0xd7,0x6a,0xf6,0x0c,0x04
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x70,0xd7,0x6a,0xf6,0x0c,0x04]
+
+0x05,0x01,0x70,0xd7,0x7e,0x82,0xad,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x70,0xd7,0x7e,0x82,0xad,0x01]
+
+0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, -v2.l, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x20]
+
+0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x00,0x00]
+
+0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x20,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:1 ; encoding: [0x01,0x20,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:1 ; encoding: [0x01,0x20,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x40,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:2 ; encoding: [0x01,0x40,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:2 ; encoding: [0x01,0x40,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x60,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.l, v3 byte_sel:3 ; encoding: [0x01,0x60,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 byte_sel:3 ; encoding: [0x01,0x60,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x28,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x48,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x68,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, |v2|, v3 ; encoding: [0x01,0x01,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] ; encoding: [0x01,0x08,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x09,0x75,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_bf8_f16 v1, |v2.h|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x75,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16 v1, |v2|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x75,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, -v2.l, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, -v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x20]
+
+0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, 0x1234 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0xff,0x01,0x00,0x34,0x12,0x00,0x00]
+
+0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, s3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x00,0x00]
+
+0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 ; encoding: [0x01,0x00,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x20,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:1 ; encoding: [0x01,0x20,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:1 ; encoding: [0x01,0x20,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x40,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:2 ; encoding: [0x01,0x40,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:2 ; encoding: [0x01,0x40,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x60,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.l, v3 byte_sel:3 ; encoding: [0x01,0x60,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 byte_sel:3 ; encoding: [0x01,0x60,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, |v2.l|, v3 ; encoding: [0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, |v2|, v3 ; encoding: [0x01,0x01,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x28,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:1 ; encoding: [0x01,0x28,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x48,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:2 ; encoding: [0x01,0x48,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x68,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, v2, v3 op_sel:[1,0,0] byte_sel:3 ; encoding: [0x01,0x68,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x09,0x74,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_sr_fp8_f16 v1, |v2.h|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x74,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16 v1, |v2|, v3 op_sel:[1,0,0] ; encoding: [0x01,0x09,0x74,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x80,0x6b,0xd7,0x02,0x07,0x02,0x00
+# GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.l, v2, v3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] ; encoding: [0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1] ; encoding: [0x01,0x40,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.l, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x69,0xd7,0x02,0x07,0x02,0x20]
+
+0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.l, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x69,0xd7,0x02,0x06,0x01,0x00]
+
+0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.l, v2, v3 clamp ; encoding: [0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, v2, v3 clamp ; encoding: [0x01,0x80,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_pk_fp8_f32 v1.h, v2, v3 op_sel:[0,0,1] clamp ; encoding: [0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32 v1, v2, v3 op_sel:[0,0,1] clamp ; encoding: [0x01,0xc0,0x69,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f32 v1.l, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x07,0x02,0x00]
+
+0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20
+# GFX1250-REAL16: v_cvt_pk_bf8_f32 v1.l, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f32 v1, -v2, |v3| ; encoding: [0x01,0x02,0x6a,0xd7,0x02,0x07,0x02,0x20]
+
+0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00
+# GFX1250-REAL16: v_cvt_pk_bf8_f32 v1.l, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f32 v1, s2, 3 ; encoding: [0x01,0x00,0x6a,0xd7,0x02,0x06,0x01,0x00]
+
+0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00
+# GFX1250: v_cvt_sr_fp8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6b,0xd7,0x02,0x07,0x02,0x00]
+
+0x0a,0x00,0x6b,0xd7,0x02,0x0a,0x02,0x00
+# GFX1250: v_cvt_sr_fp8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6b,0xd7,0x02,0x0a,0x02,0x00]
+
+0x05,0x01,0x6b,0xd7,0xff,0x09,0x02,0x20
+# GFX1250: v_cvt_sr_fp8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6b,0xd7,0xff,0x09,0x02,0x20]
+
+0x01,0x00,0x6c,0xd7,0x02,0x07,0x02,0x00
+# GFX1250: v_cvt_sr_bf8_f32 v1, v2, v3 ; encoding: [0x01,0x00,0x6c,0xd7,0x02,0x07,0x02,0x00]
+
+0x0a,0x00,0x6c,0xd7,0x02,0x0a,0x02,0x00
+# GFX1250: v_cvt_sr_bf8_f32 v10, s2, v5 ; encoding: [0x0a,0x00,0x6c,0xd7,0x02,0x0a,0x02,0x00]
+
+0x05,0x01,0x6c,0xd7,0xff,0x09,0x02,0x20
+# GFX1250: v_cvt_sr_bf8_f32 v5, -|v255|, v4 ; encoding: [0x05,0x01,0x6c,0xd7,0xff,0x09,0x02,0x20]
+
+0x0a,0x00,0xac,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xac,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x08,0xac,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_bf8 v[10:13], v[20:21], v8 scale_sel:1 ; encoding: [0x0a,0x08,0xac,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xa9,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xa9,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xab,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xab,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x38,0xab,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_bf8 v[10:13], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xab,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xa8,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xa8,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 ; encoding: [0x0a,0x00,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x28,0xa8,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp8 v[10:13], v[20:21], v8 scale_sel:5 ; encoding: [0x0a,0x28,0xa8,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x30,0xa9,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp8 v[10:13], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xa9,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xa0,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xa0,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x10,0xa0,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_bf16_fp4 v[10:13], v20, v8 scale_sel:2 ; encoding: [0x0a,0x10,0xa0,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0x9f,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, 0xcf00 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0x9f,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 ; encoding: [0x0a,0x00,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x18,0x9f,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f16_fp4 v[10:13], v20, v8 scale_sel:3 ; encoding: [0x0a,0x18,0x9f,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xad,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xad,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x38,0xad,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_bf8 v[10:17], v[20:21], v8 scale_sel:7 ; encoding: [0x0a,0x38,0xad,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xaa,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], 0xcf00 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xaa,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 ; encoding: [0x0a,0x00,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x30,0xaa,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp8 v[10:17], v[20:21], v8 scale_sel:6 ; encoding: [0x0a,0x30,0xaa,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x00,0xa1,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0x11,0x02,0x00]
+
+0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, 0xcf00 ; encoding: [0x0a,0x00,0xa1,0xd6,0x14,0xff,0x01,0x00,0x00,0xcf,0x00,0x00]
+
+0x0a,0x08,0xa1,0xd6,0x14,0x11,0x02,0x00
+# GFX1250: v_cvt_scale_pk8_f32_fp4 v[10:17], v20, v8 scale_sel:1 ; encoding: [0x0a,0x08,0xa1,0xd6,0x14,0x11,0x02,0x00]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp16.txt
index f8d9afe..5fa7bc8 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp16.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp16.txt
@@ -132,6 +132,42 @@
0x05,0x07,0x34,0xd6,0xfa,0x04,0xaa,0xe1,0x01,0x11,0x01,0xff
# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_lo bitop3:0x3f row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x07,0x34,0xd6,0xfa,0x04,0xaa,0xe1,0x01,0x11,0x01,0xff]
+0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff
+# GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+
+0x02,0x00,0x60,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_add_min_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x60,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
+0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff
+# GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+
+0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_add_max_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5e,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
+0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff
+# GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+
+0x02,0x00,0x61,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_add_min_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x61,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
+0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff
+# GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x1b,0x00,0xff]
+
+0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_add_max_u32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x5f,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
0xff,0x81,0x6d,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30
# GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6d,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
@@ -173,3 +209,216 @@
0x05,0x00,0x6d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff
# GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6d,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+0x01,0x82,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed
+# GFX1250-REAL16: v_cvt_pk_fp8_f32_e64_dpp v1.l, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x82,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x82,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+0x01,0xc2,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed
+# GFX1250-REAL16: v_cvt_pk_fp8_f32_e64_dpp v1.h, -v2, |v3| op_sel:[0,0,1] clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0xc2,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32_e64_dpp v1, -v2, |v3| op_sel:[0,0,1] clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0xc2,0x69,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+0x01,0x80,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed
+# GFX1250: v_cvt_sr_fp8_f32_e64_dpp v1, -v2, v3 clamp quad_perm:[3,2,1,0] row_mask:0xe bank_mask:0xd ; encoding: [0x01,0x80,0x6b,0xd7,0xfa,0x06,0x02,0x20,0x02,0x1b,0x00,0xed]
+
+0xff,0x83,0x6e,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x6e,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+
+0x05,0x03,0x6e,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6e,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+0x05,0x01,0x6e,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x6e,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+
+0x05,0x02,0x6e,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+
+0x05,0x02,0x6e,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6e,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+0x05,0x00,0x6e,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6e,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+0x02,0x40,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+
+0x02,0x00,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+
+0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x90,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
+0x02,0x40,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 op_sel:[0,0,0,1] row_share:0 row_mask:0x5 bank_mask:0x3 ; encoding: [0x02,0x40,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0x53]
+
+0x02,0x00,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, 1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x06,0x02,0x04,0x50,0x01,0xff]
+
+0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 quad_perm:[1,2,3,1] row_mask:0xf bank_mask:0xf ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x79,0x00,0xff]
+
+0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v2, v4, v7, v8 row_share:3 row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x02,0x00,0x91,0xd6,0xfa,0x0e,0x22,0x04,0x04,0x53,0x05,0xff]
+
+0x01,0x00,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff
+# GFX1250-REAL16: v_cvt_pk_bf8_f16_e64_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16_e64_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+
+0x01,0x40,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53
+# GFX1250-REAL16: v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+
+0x01,0x00,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff
+# GFX1250-REAL16: v_cvt_pk_fp8_f16_e64_dpp v1.l, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16_e64_dpp v1, v2 quad_perm:[1,2,3,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x00,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x39,0x00,0xff]
+
+0x01,0x40,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53
+# GFX1250-REAL16: v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] row_share:0 row_mask:0x5 bank_mask:0x3 fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xfa,0x00,0x00,0x00,0x02,0x50,0x05,0x53]
+
+0xff,0x81,0x6f,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0x6f,0xd7,0xfa,0xfe,0x03,0x38,0xff,0x6f,0x05,0x30]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x08,0x01,0x5f,0x01,0x01
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x08,0x01,0x5f,0x01,0x01]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x10,0x01,0x60,0x09,0x13
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x10,0x01,0x60,0x09,0x13]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0xe4,0x00,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1b,0x00,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x41,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x40,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x21,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x2f,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x50,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x01,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x0f,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x11,0x01,0xff]
+
+0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6f,0xd7,0xfa,0x04,0x02,0x00,0x01,0x1f,0x01,0xff]
+
+0xff,0x83,0x70,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x83,0x70,0xd7,0xfa,0xfe,0xf7,0x7b,0xff,0x6f,0x05,0x30]
+
+0x05,0x03,0x70,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x70,0xd7,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+0x05,0x01,0x70,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x01,0x70,0xd7,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+
+0x05,0x02,0x70,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0x16,0x52,0x01,0x60,0x09,0x13]
+
+0x05,0x02,0x70,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x70,0xd7,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+0x05,0x00,0x70,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x70,0xd7,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+
+0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x60,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x68,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x00,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+
+0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+
+0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x20,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x40,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x60,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x60,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+
+0x01,0x00,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x04,0xff]
+
+0x01,0x68,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x68,0x74,0xd7,0xfa,0x06,0x02,0x00,0x02,0xe4,0x00,0xff]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp8.txt
index 44726a1..faeff45 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp8.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_dpp8.txt
@@ -110,6 +110,30 @@
0x05,0x00,0x34,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05
# GFX1250: v_bitop3_b32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x34,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+0x05,0x00,0x60,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_add_min_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x60,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_add_min_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x5e,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_add_max_i32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5e,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_add_max_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x61,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_add_min_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x61,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_add_min_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x5f,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_add_max_u32_e64_dpp v5, v1, 42, s3 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x5f,0xd6,0xea,0x54,0x0d,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_add_max_u32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
0xff,0x81,0x6d,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00
# GFX1250: v_cvt_pk_bf16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6d,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
@@ -121,3 +145,162 @@
0x05,0x00,0x6d,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05
# GFX1250: v_cvt_pk_bf16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x6d,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05]
+
+0x05,0x80,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21
+# GFX1250-REAL16: v_cvt_pk_fp8_f32_e64_dpp v5.l, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+
+0x05,0xc0,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21
+# GFX1250-REAL16: v_cvt_pk_fp8_f32_e64_dpp v5.h, v1, v2 op_sel:[0,0,1] clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0xc0,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f32_e64_dpp v5, v1, v2 op_sel:[0,0,1] clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0xc0,0x69,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+
+0x05,0x80,0x6b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21
+# GFX1250: v_cvt_sr_fp8_f32_e64_dpp v5, v1, v2 clamp dpp8:[7,6,5,4,2,3,0,1] ; encoding: [0x05,0x80,0x6b,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0xa9,0x21]
+
+0xff,0x83,0x6e,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x6e,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+
+0x05,0x03,0x6e,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6e,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+0x05,0x01,0x6e,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6e,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+
+0x05,0x02,0x6e,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x6e,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+
+0x05,0x02,0x6e,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6e,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6e,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6e,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6e,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6e,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6e,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_bf16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6e,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x40,0x90,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x90,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x90,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_ashr_pk_i8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x90,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x40,0x91,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, s3 op_sel:[0,0,0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x40,0x91,0xd6,0xea,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x91,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_ashr_pk_u8_i32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x91,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x01,0x00,0x73,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05
+# GFX1250-REAL16: v_cvt_pk_bf8_f16_e64_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x73,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16_e64_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x73,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+
+0x01,0x40,0x73,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05
+# GFX1250-REAL16: v_cvt_pk_bf8_f16_e64_dpp v1.h, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_cvt_pk_bf8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x73,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+
+0x01,0x00,0x72,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05
+# GFX1250-REAL16: v_cvt_pk_fp8_f16_e64_dpp v1.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x72,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16_e64_dpp v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x00,0x72,0xd7,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+
+0x01,0x40,0x72,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05
+# GFX1250-REAL16: v_cvt_pk_fp8_f16_e64_dpp v1.h, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+# GFX1250-FAKE16: v_cvt_pk_fp8_f16_e64_dpp v1, v2 op_sel:[0,1] dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x40,0x72,0xd7,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+
+0xff,0x81,0x6f,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v255, -|v255|, v255 clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0x6f,0xd7,0xe9,0xfe,0x03,0x38,0xff,0x00,0x00,0x00]
+
+0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x00,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x08,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6f,0xd7,0xe9,0x04,0x02,0x08,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x6f,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_pk_f16_f32_e64_dpp v5, v1, v2 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0x6f,0xd7,0xea,0x04,0x02,0x10,0x01,0x77,0x39,0x05]
+
+0xff,0x83,0x70,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v255, -|v255|, -|v255|, src_scc clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x83,0x70,0xd7,0xe9,0xfe,0xf7,0x7b,0xff,0x00,0x00,0x00]
+
+0x05,0x03,0x70,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x70,0xd7,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+0x05,0x01,0x70,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, -|v1|, v2, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x70,0xd7,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+
+0x05,0x02,0x70,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, 5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x02,0x70,0xd7,0xea,0x04,0x16,0x52,0x01,0x77,0x39,0x05]
+
+0x05,0x02,0x70,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x70,0xd7,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x70,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x70,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x70,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x70,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+0x05,0x00,0x70,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05
+# GFX1250: v_cvt_sr_pk_f16_f32_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x70,0xd7,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+0x01,0x00,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x08,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x75,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x20,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x40,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x60,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x68,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_bf8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_bf8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x75,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x00,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x00,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x08,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] dpp8:[1,2,3,4,5,6,7,0] fi:1 ; encoding: [0x01,0x08,0x74,0xd7,0xea,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x20,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:1 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x20,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x40,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:2 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x40,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x60,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.l, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x60,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+
+0x01,0x68,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f
+# GFX1250-REAL16: v_cvt_sr_fp8_f16_e64_dpp v1, v2.h, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
+# GFX1250-FAKE16: v_cvt_sr_fp8_f16_e64_dpp v1, v2, v3 op_sel:[1,0,0] byte_sel:3 dpp8:[1,2,3,4,5,6,7,0] ; encoding: [0x01,0x68,0x74,0xd7,0xe9,0x06,0x02,0x00,0x02,0xd1,0x58,0x1f]
diff --git a/llvm/test/MC/ELF/many-instructions.s b/llvm/test/MC/ELF/many-instructions.s
deleted file mode 100644
index cbdb2a7..0000000
--- a/llvm/test/MC/ELF/many-instructions.s
+++ /dev/null
@@ -1,10 +0,0 @@
-# REQUIRES: asserts
-# RUN: llvm-mc -filetype=obj -triple=x86_64 %s -o /dev/null -debug-only=mc-dump
-
-## Test that encodeInstruction may cause a new fragment to be created.
-# CHECK: 0 Data Size:16200
-# CHECK: 16200 Data Size:180
-
-.rept 16384/10
-movabsq $foo, %rax
-.endr
diff --git a/llvm/test/MC/X86/verify-callgraph-section.s b/llvm/test/MC/X86/verify-callgraph-section.s
new file mode 100644
index 0000000..ce07228
--- /dev/null
+++ b/llvm/test/MC/X86/verify-callgraph-section.s
@@ -0,0 +1,58 @@
+/// Test the callgraph section to make sure the indirect callsites
+/// (annotated by generated temporary labels .Ltmp*) are associated
+/// with the corresponding callee type identifiers.
+
+// RUN: llvm-mc -triple=x86_64 -filetype=obj -o - < %s | llvm-readelf -x .callgraph - | FileCheck %s
+
+ .text
+ .globl ball # -- Begin function ball
+ .p2align 4
+ .type ball,@function
+ball: # @ball
+.Lfunc_begin0:
+# %bb.0: # %entry
+ pushq %rbx
+ subq $32, %rsp
+ movl $0, 4(%rsp)
+ movq foo@GOTPCREL(%rip), %rcx
+ movq %rcx, 24(%rsp)
+ xorl %eax, %eax
+ callq *%rcx
+.Ltmp0:
+ movq bar@GOTPCREL(%rip), %rax
+ movq %rax, 16(%rsp)
+ movsbl 3(%rsp), %edi
+ callq *%rax
+.Ltmp1:
+ movq baz@GOTPCREL(%rip), %rax
+ movq %rax, 8(%rsp)
+ leaq 3(%rsp), %rbx
+ movq %rbx, %rdi
+ callq *%rax
+.Ltmp2:
+ callq foo@PLT
+ movsbl 3(%rsp), %edi
+ callq bar@PLT
+ movq %rbx, %rdi
+ callq baz@PLT
+ addq $32, %rsp
+ popq %rbx
+ retq
+ .section .callgraph,"o",@progbits,.text
+ .quad 0
+ .quad .Lfunc_begin0
+ .quad 1
+ .quad 3
+ /// MD5 hash of the callee type ID for foo.
+ // CHECK: 2444f731 f5eecb3e
+ .quad 0x3ecbeef531f74424
+ .quad .Ltmp0
+ /// MD5 hash of the callee type ID for bar.
+ // CHECK: 5486bc59 814b8e30
+ .quad 0x308e4b8159bc8654
+ .quad .Ltmp1
+ /// MD5 hash of the callee type ID for baz.
+ // CHECK: 7ade6814 f897fd77
+ .quad 0x77fd97f81468de7a
+ .quad .Ltmp2
+ .text
diff --git a/llvm/test/ThinLTO/AArch64/cgdata-merge-read.ll b/llvm/test/ThinLTO/AArch64/cgdata-merge-read.ll
index da756e7..9eb9bda 100644
--- a/llvm/test/ThinLTO/AArch64/cgdata-merge-read.ll
+++ b/llvm/test/ThinLTO/AArch64/cgdata-merge-read.ll
@@ -30,6 +30,20 @@
; RUN: llvm-objdump -d %tout-read.1 | FileCheck %s --check-prefix=THUNK1
; RUN: llvm-objdump -d %tout-read.2 | FileCheck %s --check-prefix=THUNK2
+; It runs the same if we use -indexed-codegen-data-read-function-map-names=false.
+; RUN: llvm-lto2 run -enable-global-merge-func=true \
+; RUN: -indexed-codegen-data-read-function-map-names=false \
+; RUN: -codegen-data-use-path=%tout.cgdata \
+; RUN: %t-foo.bc %t-goo.bc -o %tout-read \
+; RUN: -r %t-foo.bc,_f1,px \
+; RUN: -r %t-goo.bc,_f2,px \
+; RUN: -r %t-foo.bc,_g,l -r %t-foo.bc,_g1,l -r %t-foo.bc,_g2,l \
+; RUN: -r %t-goo.bc,_g,l -r %t-goo.bc,_g1,l -r %t-goo.bc,_g2,l
+; RUN: llvm-nm %tout-read.1 | FileCheck %s --check-prefix=READ1
+; RUN: llvm-nm %tout-read.2 | FileCheck %s --check-prefix=READ2
+; RUN: llvm-objdump -d %tout-read.1 | FileCheck %s --check-prefix=THUNK1
+; RUN: llvm-objdump -d %tout-read.2 | FileCheck %s --check-prefix=THUNK2
+
; READ1: _f1.Tgm
; READ2: _f2.Tgm
diff --git a/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll b/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll
index 4d57199..bb3001e 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll
@@ -190,6 +190,39 @@ return: ; preds = %entry, %if.end
ret i32 %retval.0
}
+define i32 @ctz3_with_i8gep(i32 %x) {
+; CHECK-LABEL: @ctz3_with_i8gep(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], 0
+; CHECK-NEXT: br i1 [[CMP]], label [[RETURN:%.*]], label [[IF_END:%.*]]
+; CHECK: if.end:
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.cttz.i32(i32 [[X]], i1 true)
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ [[TMP2]], [[IF_END]] ], [ 32, [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret i32 [[RETVAL_0]]
+;
+entry:
+ %cmp = icmp eq i32 %x, 0
+ br i1 %cmp, label %return, label %if.end
+
+if.end: ; preds = %entry
+ %sub = sub i32 0, %x
+ %and = and i32 %x, %sub
+ %mul = mul i32 %and, 81224991
+ %0 = lshr i32 %mul, 25
+ %1 = and i32 %0, 124
+ %arrayidx.idx = zext nneg i32 %1 to i64
+ %arrayidx = getelementptr inbounds nuw i8, ptr @ctz3.table, i64 %arrayidx.idx
+ %2 = load i32, ptr %arrayidx, align 4
+ br label %return
+
+return: ; preds = %if.end, %entry
+ %retval.0 = phi i32 [ %2, %if.end ], [ 32, %entry ]
+ ret i32 %retval.0
+}
+
+
@table = internal unnamed_addr constant [64 x i32] [i32 0, i32 1, i32 12, i32 2, i32 13, i32 22, i32 17, i32 3, i32 14, i32 33, i32 23, i32 36, i32 18, i32 58, i32 28, i32 4, i32 62, i32 15, i32 34, i32 26, i32 24, i32 48, i32 50, i32 37, i32 19, i32 55, i32 59, i32 52, i32 29, i32 44, i32 39, i32 5, i32 63, i32 11, i32 21, i32 16, i32 32, i32 35, i32 57, i32 27, i32 61, i32 25, i32 47, i32 49, i32 54, i32 51, i32 43, i32 38, i32 10, i32 20, i32 31, i32 56, i32 60, i32 46, i32 53, i32 42, i32 9, i32 30, i32 45, i32 41, i32 8, i32 40, i32 7, i32 6], align 4
define i32 @ctz4(i64 %b) {
@@ -276,3 +309,192 @@ entry:
%0 = load i32, ptr %arrayidx, align 4
ret i32 %0
}
+
+;; This has a wrong table size but is otherwise fine.
+@ctz9.table = internal unnamed_addr constant [128 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1
+define i32 @ctz9(i32 %x) {
+; CHECK-LABEL: @ctz9(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP3]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %sub = sub i32 0, %x
+ %and = and i32 %sub, %x
+ %mul = mul i32 %and, 125613361
+ %shr = lshr i32 %mul, 27
+ %idxprom = zext i32 %shr to i64
+ %arrayidx = getelementptr inbounds [128 x i8], ptr @ctz9.table, i64 0, i64 %idxprom
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = zext i8 %0 to i32
+ ret i32 %conv
+}
+
+define i32 @ctz1_with_i8_gep(i32 %x) {
+; CHECK-LABEL: @ctz1_with_i8_gep(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP3]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %sub = sub i32 0, %x
+ %and = and i32 %sub, %x
+ %mul = mul i32 %and, 125613361
+ %shr = lshr i32 %mul, 27
+ %idxprom = zext i32 %shr to i64
+ %arrayidx = getelementptr inbounds i8, ptr @ctz7.table, i64 %idxprom
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = zext i8 %0 to i32
+ ret i32 %conv
+}
+
+; This is the same a ctz2 (i16 table) with an i8 gep making the indices invalid
+define i32 @ctz2_with_i8_gep(i32 %x) {
+; CHECK-LABEL: @ctz2_with_i8_gep(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SUB:%.*]] = sub i32 0, [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[SUB]], [[X]]
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[AND]], 72416175
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[MUL]], 26
+; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[SHR]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i8], ptr @ctz2.table, i64 0, i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %sub = sub i32 0, %x
+ %and = and i32 %sub, %x
+ %mul = mul i32 %and, 72416175
+ %shr = lshr i32 %mul, 26
+ %idxprom = zext i32 %shr to i64
+ %arrayidx = getelementptr inbounds [64 x i8], ptr @ctz2.table, i64 0, i64 %idxprom
+ %0 = load i16, ptr %arrayidx, align 1
+ %conv = sext i16 %0 to i32
+ ret i32 %conv
+}
+
+; This is the same a ctz2_with_i8_gep but with the gep index multiplied by 2.
+define i32 @ctz2_with_i8_gep_fixed(i32 %x) {
+; CHECK-LABEL: @ctz2_with_i8_gep_fixed(
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false)
+; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
+; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+ %sub = sub i32 0, %x
+ %and = and i32 %x, %sub
+ %mul = mul i32 %and, 72416175
+ %shr = lshr i32 %mul, 25
+ %shr2 = and i32 %shr, 126
+ %1 = zext nneg i32 %shr2 to i64
+ %arrayidx = getelementptr inbounds nuw i8, ptr @ctz2.table, i64 %1
+ %2 = load i16, ptr %arrayidx, align 2
+ %conv = sext i16 %2 to i32
+ ret i32 %conv
+}
+
+; This is a i16 input with the debruijn table stored in a single i128.
+@tablei128 = internal unnamed_addr constant i128 16018378897745984667142067713738932480, align 16
+define i32 @cttz_i16_via_i128(i16 noundef %x) {
+; CHECK-LABEL: @cttz_i16_via_i128(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i16 @llvm.cttz.i16(i16 [[X:%.*]], i1 true)
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i16 [[X]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP3]], i16 0, i16 [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT: [[CONV6:%.*]] = zext i8 [[TMP1]] to i32
+; CHECK-NEXT: ret i32 [[CONV6]]
+;
+entry:
+ %sub = sub i16 0, %x
+ %and = and i16 %x, %sub
+ %mul = mul i16 %and, 2479
+ %0 = lshr i16 %mul, 12
+ %idxprom = zext nneg i16 %0 to i64
+ %arrayidx = getelementptr inbounds nuw i8, ptr @tablei128, i64 %idxprom
+ %1 = load i8, ptr %arrayidx, align 1
+ %conv6 = zext i8 %1 to i32
+ ret i32 %conv6
+}
+
+; Same as above but the table is a little off
+@tablei128b = internal unnamed_addr constant i128 16018378897745984667142068813250560256, align 16
+define i32 @cttz_i16_via_i128_incorrecttable(i16 noundef %x) {
+; CHECK-LABEL: @cttz_i16_via_i128_incorrecttable(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SUB:%.*]] = sub i16 0, [[X:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i16 [[X]], [[SUB]]
+; CHECK-NEXT: [[MUL:%.*]] = mul i16 [[AND]], 2479
+; CHECK-NEXT: [[TMP0:%.*]] = lshr i16 [[MUL]], 12
+; CHECK-NEXT: [[IDXPROM:%.*]] = zext nneg i16 [[TMP0]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr @tablei128b, i64 [[IDXPROM]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[CONV6:%.*]] = zext i8 [[TMP3]] to i32
+; CHECK-NEXT: ret i32 [[CONV6]]
+;
+entry:
+ %sub = sub i16 0, %x
+ %and = and i16 %x, %sub
+ %mul = mul i16 %and, 2479
+ %0 = lshr i16 %mul, 12
+ %idxprom = zext nneg i16 %0 to i64
+ %arrayidx = getelementptr inbounds nuw i8, ptr @tablei128b, i64 %idxprom
+ %1 = load i8, ptr %arrayidx, align 1
+ %conv6 = zext i8 %1 to i32
+ ret i32 %conv6
+}
+
+; Same as ctz1 but the table and load is very large
+@ctz7i128.table = internal unnamed_addr constant [32 x i128] [i128 0, i128 1, i128 28, i128 2, i128 29, i128 14, i128 24, i128 3, i128 30, i128 22, i128 20, i128 15, i128 25, i128 17, i128 4, i128 8, i128 31, i128 27, i128 13, i128 23, i128 21, i128 19, i128 16, i128 7, i128 26, i128 12, i128 18, i128 6, i128 11, i128 5, i128 10, i128 9], align 16
+define i128 @ctz1_i128(i32 %x) {
+; CHECK-LABEL: @ctz1_i128(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i128
+; CHECK-NEXT: ret i128 [[TMP3]]
+;
+entry:
+ %sub = sub i32 0, %x
+ %and = and i32 %sub, %x
+ %mul = mul i32 %and, 125613361
+ %shr = lshr i32 %mul, 27
+ %idxprom = zext i32 %shr to i64
+ %arrayidx = getelementptr inbounds [32 x i128], ptr @ctz7i128.table, i64 0, i64 %idxprom
+ %l = load i128, ptr %arrayidx, align 1
+ ret i128 %l
+}
+
+; This is roughly the same as ctz1 but using i128.
+@table.i128 = internal unnamed_addr constant [128 x i8] c"\00\01e\02tf<\03|ug^R=!\04}yvWoh_5ZSE>0\22\14\05~rzPwmX.pkiI`K6\1Ab[TBMF?'81*#\1C\15\0E\06\7Fds;{]Q xVn4YD/\13qOl-jHJ\19aAL&7)\1B\0Dc:\\\1FU3C\12N,G\18@%(\0C9\1E2\11+\17$\0B\1D\10\16\0A\0F\09\08\07", align 1
+define i32 @src(i128 noundef %x) {
+; CHECK-LABEL: @src(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP3:%.*]] = call i128 @llvm.cttz.i128(i128 [[X:%.*]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i128 [[X]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i128 0, i128 [[TMP3]]
+; CHECK-NEXT: [[TMP0:%.*]] = trunc i128 [[TMP2]] to i8
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %sub = sub i128 0, %x
+ %and = and i128 %x, %sub
+ %mul = mul i128 %and, 2647824804797170443043024478319300753
+ %shr = lshr i128 %mul, 121
+ %idxprom = trunc i128 %shr to i64
+ %arrayidx = getelementptr inbounds nuw i8, ptr @table.i128, i64 %idxprom
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = zext i8 %0 to i32
+ ret i32 %conv
+}
diff --git a/llvm/test/Transforms/AggressiveInstCombine/negative-lower-table-based-cttz.ll b/llvm/test/Transforms/AggressiveInstCombine/negative-lower-table-based-cttz.ll
index 714acd7..90836db8 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/negative-lower-table-based-cttz.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/negative-lower-table-based-cttz.ll
@@ -66,7 +66,7 @@ entry:
;; This is a negative test with a wrong table size and constants.
-@ctz3.table = internal unnamed_addr constant [128 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1
+@ctz3.table = internal unnamed_addr constant [128 x i8] c"\01\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1
define i32 @ctz5(i32 %x) {
entry:
diff --git a/llvm/test/Transforms/IndVarSimplify/AArch64/fold-ext-add.ll b/llvm/test/Transforms/IndVarSimplify/AArch64/fold-ext-add.ll
index 48b92e9..640c910 100644
--- a/llvm/test/Transforms/IndVarSimplify/AArch64/fold-ext-add.ll
+++ b/llvm/test/Transforms/IndVarSimplify/AArch64/fold-ext-add.ll
@@ -10,21 +10,21 @@ define void @pred_mip_12(ptr %dst, ptr %src, i32 %n, i64 %offset) {
; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i32 [[N:%.*]], i64 [[OFFSET:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[N]], i32 1)
+; CHECK-NEXT: [[TMP0:%.*]] = zext nneg i32 [[SMAX]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[OFFSET]], [[TMP0]]
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP1]]
; CHECK-NEXT: br label %[[OUTER_LOOP:.*]]
; CHECK: [[OUTER_LOOP_LOOPEXIT:.*]]:
-; CHECK-NEXT: [[PTR_IV_NEXT_LCSSA:%.*]] = phi ptr [ [[PTR_IV_NEXT:%.*]], %[[INNER_LOOP:.*]] ]
; CHECK-NEXT: br label %[[OUTER_LOOP]]
; CHECK: [[OUTER_LOOP]]:
-; CHECK-NEXT: [[OUTER_PTR:%.*]] = phi ptr [ [[SRC]], %[[ENTRY]] ], [ [[PTR_IV_NEXT_LCSSA]], %[[OUTER_LOOP_LOOPEXIT]] ]
+; CHECK-NEXT: [[OUTER_PTR:%.*]] = phi ptr [ [[SRC]], %[[ENTRY]] ], [ [[SCEVGEP]], %[[OUTER_LOOP_LOOPEXIT]] ]
; CHECK-NEXT: [[C:%.*]] = call i1 @cond()
; CHECK-NEXT: br i1 [[C]], label %[[INNER_LOOP_PREHEADER:.*]], label %[[EXIT:.*]]
; CHECK: [[INNER_LOOP_PREHEADER]]:
-; CHECK-NEXT: br label %[[INNER_LOOP]]
+; CHECK-NEXT: br label %[[INNER_LOOP:.*]]
; CHECK: [[INNER_LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[INNER_LOOP]] ], [ 0, %[[INNER_LOOP_PREHEADER]] ]
-; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[PTR_IV_NEXT]], %[[INNER_LOOP]] ], [ [[SRC]], %[[INNER_LOOP_PREHEADER]] ]
; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[OUTER_PTR]], align 1
-; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 [[OFFSET]]
; CHECK-NEXT: store i8 [[L]], ptr [[DST]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[IV_NEXT]], [[SMAX]]
diff --git a/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll b/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll
index d24f9a4..17921af 100644
--- a/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll
+++ b/llvm/test/Transforms/IndVarSimplify/zext-nuw.ll
@@ -15,11 +15,9 @@ define void @_Z3fn1v() {
; CHECK-NEXT: [[J_SROA_0_0_COPYLOAD:%.*]] = load i8, ptr [[X5]], align 1
; CHECK-NEXT: br label [[DOTPREHEADER4_LR_PH:%.*]]
; CHECK: .preheader4.lr.ph:
-; CHECK-NEXT: [[TMP1:%.*]] = add nsw i32 [[X4]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1
; CHECK-NEXT: [[TMP4:%.*]] = sext i8 [[J_SROA_0_0_COPYLOAD]] to i64
-; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP3]], [[TMP4]]
+; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[X4]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], [[TMP2]]
; CHECK-NEXT: br label [[DOTPREHEADER4:%.*]]
; CHECK: .preheader4:
; CHECK-NEXT: [[K_09:%.*]] = phi ptr [ undef, [[DOTPREHEADER4_LR_PH]] ], [ [[X25:%.*]], [[X22:%.*]] ]
diff --git a/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs-low-threshold.ll b/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs-low-threshold.ll
index b57a45f..8a608a1 100644
--- a/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs-low-threshold.ll
+++ b/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs-low-threshold.ll
@@ -7,10 +7,10 @@ target triple = "aarch64"
declare void @streaming_compatible_f() #0 "aarch64_pstate_sm_compatible"
-; Function @streaming_callee doesn't contain any operations that may use ZA
+; Function @non_streaming_callee doesn't contain any operations that may use ZA
; state and therefore can be legally inlined into a normal function.
-define void @streaming_callee() #0 "aarch64_pstate_sm_enabled" {
-; CHECK-LABEL: define void @streaming_callee
+define void @non_streaming_callee() #0 {
+; CHECK-LABEL: define void @non_streaming_callee
; CHECK-SAME: () #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: call void @streaming_compatible_f()
; CHECK-NEXT: call void @streaming_compatible_f()
@@ -21,26 +21,26 @@ define void @streaming_callee() #0 "aarch64_pstate_sm_enabled" {
ret void
}
-; Inline call to @streaming_callee to remove a streaming mode change.
-define void @non_streaming_caller_inline() #0 {
-; CHECK-LABEL: define void @non_streaming_caller_inline
+; Inline call to @non_streaming_callee to remove a streaming mode change.
+define void @streaming_caller_inline() #0 "aarch64_pstate_sm_enabled" {
+; CHECK-LABEL: define void @streaming_caller_inline
; CHECK-SAME: () #[[ATTR2:[0-9]+]] {
; CHECK-NEXT: call void @streaming_compatible_f()
; CHECK-NEXT: call void @streaming_compatible_f()
; CHECK-NEXT: ret void
;
- call void @streaming_callee()
+ call void @non_streaming_callee()
ret void
}
-; Don't inline call to @streaming_callee when the inline-threshold is set to 1, because it does not eliminate a streaming-mode change.
-define void @streaming_caller_dont_inline() #0 "aarch64_pstate_sm_enabled" {
-; CHECK-LABEL: define void @streaming_caller_dont_inline
+; Don't inline call to @non_streaming_callee when the inline-threshold is set to 1, because it does not eliminate a streaming-mode change.
+define void @non_streaming_caller_dont_inline() #0 {
+; CHECK-LABEL: define void @non_streaming_caller_dont_inline
; CHECK-SAME: () #[[ATTR1]] {
-; CHECK-NEXT: call void @streaming_callee()
+; CHECK-NEXT: call void @non_streaming_callee()
; CHECK-NEXT: ret void
;
- call void @streaming_callee()
+ call void @non_streaming_callee()
ret void
}
diff --git a/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs.ll b/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs.ll
index 6cb1692..077a3aa 100644
--- a/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs.ll
+++ b/llvm/test/Transforms/Inline/AArch64/sme-pstatesm-attrs.ll
@@ -86,7 +86,7 @@ entry:
; [ ] N -> SC + B
define i32 @normal_caller_normal_callee_inline() #0 {
; CHECK-LABEL: define i32 @normal_caller_normal_callee_inline
-; CHECK-SAME: () #[[ATTR6:[0-9]+]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -103,7 +103,7 @@ entry:
; [ ] N -> SC + B
define i32 @normal_caller_streaming_callee_dont_inline() #0 {
; CHECK-LABEL: define i32 @normal_caller_streaming_callee_dont_inline
-; CHECK-SAME: () #[[ATTR6]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @streaming_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -120,7 +120,7 @@ entry:
; [ ] N -> SC + B
define i32 @normal_caller_streaming_compatible_callee_inline() #0 {
; CHECK-LABEL: define i32 @normal_caller_streaming_compatible_callee_inline
-; CHECK-SAME: () #[[ATTR6]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -137,7 +137,7 @@ entry:
; [ ] N -> SC + B
define i32 @normal_caller_locally_streaming_callee_dont_inline() #0 {
; CHECK-LABEL: define i32 @normal_caller_locally_streaming_callee_dont_inline
-; CHECK-SAME: () #[[ATTR6]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @locally_streaming_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -154,7 +154,7 @@ entry:
; [x] N -> SC + B
define i32 @normal_caller_streaming_compatible_locally_streaming_callee_dont_inline() #0 {
; CHECK-LABEL: define i32 @normal_caller_streaming_compatible_locally_streaming_callee_dont_inline
-; CHECK-SAME: () #[[ATTR6]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @streaming_compatible_locally_streaming_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -171,7 +171,7 @@ entry:
; [ ] S -> SC + B
define i32 @streaming_caller_normal_callee_dont_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define i32 @streaming_caller_normal_callee_dont_inline
-; CHECK-SAME: () #[[ATTR7:[0-9]+]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @normal_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -188,7 +188,7 @@ entry:
; [ ] S -> SC + B
define i32 @streaming_caller_streaming_callee_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define i32 @streaming_caller_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR7]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -205,7 +205,7 @@ entry:
; [ ] S -> SC + B
define i32 @streaming_caller_streaming_compatible_callee_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define i32 @streaming_caller_streaming_compatible_callee_inline
-; CHECK-SAME: () #[[ATTR7]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -222,7 +222,7 @@ entry:
; [ ] S -> SC + B
define i32 @streaming_caller_locally_streaming_callee_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define i32 @streaming_caller_locally_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR7]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -239,7 +239,7 @@ entry:
; [x] S -> SC + B
define i32 @streaming_caller_streaming_compatible_locally_streaming_callee_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define i32 @streaming_caller_streaming_compatible_locally_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR7]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -256,7 +256,7 @@ entry:
; [ ] N + B -> SC + B
define i32 @locally_streaming_caller_normal_callee_dont_inline() #0 "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @locally_streaming_caller_normal_callee_dont_inline
-; CHECK-SAME: () #[[ATTR8:[0-9]+]] {
+; CHECK-SAME: () #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @normal_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -273,7 +273,7 @@ entry:
; [ ] N + B -> SC + B
define i32 @locally_streaming_caller_streaming_callee_inline() #0 "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @locally_streaming_caller_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR8]] {
+; CHECK-SAME: () #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -290,7 +290,7 @@ entry:
; [ ] N + B -> SC + B
define i32 @locally_streaming_caller_streaming_compatible_callee_inline() #0 "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @locally_streaming_caller_streaming_compatible_callee_inline
-; CHECK-SAME: () #[[ATTR8]] {
+; CHECK-SAME: () #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -307,7 +307,7 @@ entry:
; [ ] N + B -> SC + B
define i32 @locally_streaming_caller_locally_streaming_callee_inline() #0 "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @locally_streaming_caller_locally_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR8]] {
+; CHECK-SAME: () #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -324,7 +324,7 @@ entry:
; [x] N + B -> SC + B
define i32 @locally_streaming_caller_streaming_compatible_locally_streaming_callee_inline() #0 "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @locally_streaming_caller_streaming_compatible_locally_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR8]] {
+; CHECK-SAME: () #[[ATTR3]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -341,7 +341,7 @@ entry:
; [ ] SC -> SC + B
define i32 @streaming_compatible_caller_normal_callee_dont_inline() #0 "aarch64_pstate_sm_compatible" {
; CHECK-LABEL: define i32 @streaming_compatible_caller_normal_callee_dont_inline
-; CHECK-SAME: () #[[ATTR9:[0-9]+]] {
+; CHECK-SAME: () #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @normal_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -358,7 +358,7 @@ entry:
; [ ] SC -> SC + B
define i32 @streaming_compatible_caller_streaming_callee_dont_inline() #0 "aarch64_pstate_sm_compatible" {
; CHECK-LABEL: define i32 @streaming_compatible_caller_streaming_callee_dont_inline
-; CHECK-SAME: () #[[ATTR9]] {
+; CHECK-SAME: () #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @streaming_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -375,7 +375,7 @@ entry:
; [ ] SC -> SC + B
define i32 @streaming_compatible_caller_streaming_compatible_callee_inline() #0 "aarch64_pstate_sm_compatible" {
; CHECK-LABEL: define i32 @streaming_compatible_caller_streaming_compatible_callee_inline
-; CHECK-SAME: () #[[ATTR9]] {
+; CHECK-SAME: () #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -392,7 +392,7 @@ entry:
; [ ] SC -> SC + B
define i32 @streaming_compatible_caller_locally_streaming_callee_dont_inline() #0 "aarch64_pstate_sm_compatible" {
; CHECK-LABEL: define i32 @streaming_compatible_caller_locally_streaming_callee_dont_inline
-; CHECK-SAME: () #[[ATTR9]] {
+; CHECK-SAME: () #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @locally_streaming_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -409,7 +409,7 @@ entry:
; [x] SC -> SC + B
define i32 @streaming_compatible_caller_streaming_compatible_locally_streaming_callee_dont_inline() #0 "aarch64_pstate_sm_compatible" {
; CHECK-LABEL: define i32 @streaming_compatible_caller_streaming_compatible_locally_streaming_callee_dont_inline
-; CHECK-SAME: () #[[ATTR9]] {
+; CHECK-SAME: () #[[ATTR4]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @streaming_compatible_locally_streaming_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -425,7 +425,7 @@ entry:
; [ ] SC + B -> SC + B
define i32 @streaming_compatible_locally_streaming_caller_normal_callee_dont_inline() #0 "aarch64_pstate_sm_compatible" "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @streaming_compatible_locally_streaming_caller_normal_callee_dont_inline
-; CHECK-SAME: () #[[ATTR10:[0-9]+]] {
+; CHECK-SAME: () #[[ATTR5]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i32 @normal_callee()
; CHECK-NEXT: ret i32 [[RES]]
@@ -442,7 +442,7 @@ entry:
; [ ] SC + B -> SC + B
define i32 @streaming_compatible_locally_streaming_caller_streaming_callee_inline() #0 "aarch64_pstate_sm_compatible" "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @streaming_compatible_locally_streaming_caller_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR10]] {
+; CHECK-SAME: () #[[ATTR5]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -459,7 +459,7 @@ entry:
; [ ] SC + B -> SC + B
define i32 @streaming_compatible_locally_streaming_caller_streaming_compatible_callee_inline() #0 "aarch64_pstate_sm_compatible" "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @streaming_compatible_locally_streaming_caller_streaming_compatible_callee_inline
-; CHECK-SAME: () #[[ATTR10]] {
+; CHECK-SAME: () #[[ATTR5]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -476,7 +476,7 @@ entry:
; [ ] SC + B -> SC + B
define i32 @streaming_compatible_locally_streaming_caller_locally_streaming_callee_inline() #0 "aarch64_pstate_sm_compatible" "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @streaming_compatible_locally_streaming_caller_locally_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR10]] {
+; CHECK-SAME: () #[[ATTR5]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -493,7 +493,7 @@ entry:
; [x] SC + B -> SC + B
define i32 @streaming_compatible_locally_streaming_caller_and_callee_inline() #0 "aarch64_pstate_sm_compatible" "aarch64_pstate_sm_body" {
; CHECK-LABEL: define i32 @streaming_compatible_locally_streaming_caller_and_callee_inline
-; CHECK-SAME: () #[[ATTR10]] {
+; CHECK-SAME: () #[[ATTR5]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES_I:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-NEXT: ret i32 [[RES_I]]
@@ -505,7 +505,7 @@ entry:
define void @normal_callee_with_inlineasm() #0 {
; CHECK-LABEL: define void @normal_callee_with_inlineasm
-; CHECK-SAME: () #[[ATTR6]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: call void asm sideeffect "
; CHECK-NEXT: ret void
@@ -517,7 +517,7 @@ entry:
define void @streaming_caller_normal_callee_with_inlineasm_dont_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define void @streaming_caller_normal_callee_with_inlineasm_dont_inline
-; CHECK-SAME: () #[[ATTR7]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: call void @normal_callee_with_inlineasm()
; CHECK-NEXT: ret void
@@ -529,7 +529,7 @@ entry:
define i64 @normal_callee_with_intrinsic_call() #0 {
; CHECK-LABEL: define i64 @normal_callee_with_intrinsic_call
-; CHECK-SAME: () #[[ATTR6]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.aarch64.sve.cntb(i32 4)
; CHECK-NEXT: ret i64 [[RES]]
@@ -541,7 +541,7 @@ entry:
define i64 @streaming_caller_normal_callee_with_intrinsic_call_dont_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define i64 @streaming_caller_normal_callee_with_intrinsic_call_dont_inline
-; CHECK-SAME: () #[[ATTR7]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i64 @normal_callee_with_intrinsic_call()
; CHECK-NEXT: ret i64 [[RES]]
@@ -555,7 +555,7 @@ declare i64 @llvm.aarch64.sve.cntb(i32)
define i64 @normal_callee_call_sme_state() #0 {
; CHECK-LABEL: define i64 @normal_callee_call_sme_state
-; CHECK-SAME: () #[[ATTR6]] {
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call { i64, i64 } @__arm_sme_state()
; CHECK-NEXT: [[RES_0:%.*]] = extractvalue { i64, i64 } [[RES]], 0
@@ -571,7 +571,7 @@ declare {i64, i64} @__arm_sme_state()
define i64 @streaming_caller_normal_callee_call_sme_state_dont_inline() #0 "aarch64_pstate_sm_enabled" {
; CHECK-LABEL: define i64 @streaming_caller_normal_callee_call_sme_state_dont_inline
-; CHECK-SAME: () #[[ATTR7]] {
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[RES:%.*]] = call i64 @normal_callee_call_sme_state()
; CHECK-NEXT: ret i64 [[RES]]
@@ -583,57 +583,57 @@ entry:
-declare void @streaming_body() "aarch64_pstate_sm_enabled"
+declare void @nonstreaming_body()
-define void @streaming_caller_single_streaming_callee() #0 "aarch64_pstate_sm_enabled" {
-; CHECK-LABEL: define void @streaming_caller_single_streaming_callee
-; CHECK-SAME: () #[[ATTR7]] {
-; CHECK-NEXT: call void @streaming_body()
+define void @nonstreaming_caller_single_nonstreaming_callee() #0 {
+; CHECK-LABEL: define void @nonstreaming_caller_single_nonstreaming_callee
+; CHECK-SAME: () #[[ATTR1]] {
+; CHECK-NEXT: call void @nonstreaming_body()
; CHECK-NEXT: ret void
;
- call void @streaming_body()
+ call void @nonstreaming_body()
ret void
}
-define void @streaming_caller_multiple_streaming_callees() #0 "aarch64_pstate_sm_enabled" {
-; CHECK-LABEL: define void @streaming_caller_multiple_streaming_callees
-; CHECK-SAME: () #[[ATTR7]] {
-; CHECK-NEXT: call void @streaming_body()
-; CHECK-NEXT: call void @streaming_body()
+define void @nonstreaming_caller_multiple_nonstreaming_callees() #0 {
+; CHECK-LABEL: define void @nonstreaming_caller_multiple_nonstreaming_callees
+; CHECK-SAME: () #[[ATTR1]] {
+; CHECK-NEXT: call void @nonstreaming_body()
+; CHECK-NEXT: call void @nonstreaming_body()
; CHECK-NEXT: ret void
;
- call void @streaming_body()
- call void @streaming_body()
+ call void @nonstreaming_body()
+ call void @nonstreaming_body()
ret void
}
; Allow inlining, as inline it would not increase the number of streaming-mode changes.
-define void @streaming_caller_single_streaming_callee_inline() #0 {
-; CHECK-LABEL: define void @streaming_caller_single_streaming_callee_inline
-; CHECK-SAME: () #[[ATTR6]] {
-; CHECK-NEXT: call void @streaming_body()
+define void @streaming_caller_to_nonstreaming_callee_with_single_nonstreaming_callee_inline() #0 "aarch64_pstate_sm_enabled" {
+; CHECK-LABEL: define void @streaming_caller_to_nonstreaming_callee_with_single_nonstreaming_callee_inline
+; CHECK-SAME: () #[[ATTR2]] {
+; CHECK-NEXT: call void @nonstreaming_body()
; CHECK-NEXT: ret void
;
- call void @streaming_caller_single_streaming_callee()
+ call void @nonstreaming_caller_single_nonstreaming_callee()
ret void
}
-; Prevent inlining, as inline it would lead to multiple streaming-mode changes.
-define void @streaming_caller_multiple_streaming_callees_dont_inline() #0 {
-; CHECK-LABEL: define void @streaming_caller_multiple_streaming_callees_dont_inline
-; CHECK-SAME: () #[[ATTR6]] {
-; CHECK-NEXT: call void @streaming_caller_multiple_streaming_callees()
+; Prevent inlining, as inlining it would lead to multiple streaming-mode changes.
+define void @streaming_caller_to_nonstreaming_callee_with_multiple_nonstreaming_callees_dont_inline() #0 "aarch64_pstate_sm_enabled" {
+; CHECK-LABEL: define void @streaming_caller_to_nonstreaming_callee_with_multiple_nonstreaming_callees_dont_inline
+; CHECK-SAME: () #[[ATTR2]] {
+; CHECK-NEXT: call void @streaming_caller_to_nonstreaming_callee_with_multiple_nonstreaming_callees_dont_inline()
; CHECK-NEXT: ret void
;
- call void @streaming_caller_multiple_streaming_callees()
+ call void @streaming_caller_to_nonstreaming_callee_with_multiple_nonstreaming_callees_dont_inline()
ret void
}
declare void @streaming_compatible_body() "aarch64_pstate_sm_compatible"
-define void @streaming_caller_single_streaming_compatible_callee() #0 "aarch64_pstate_sm_enabled" {
-; CHECK-LABEL: define void @streaming_caller_single_streaming_compatible_callee
-; CHECK-SAME: () #[[ATTR7]] {
+define void @nonstreaming_caller_single_streaming_compatible_callee() #0 {
+; CHECK-LABEL: define void @nonstreaming_caller_single_streaming_compatible_callee
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: call void @streaming_compatible_body()
; CHECK-NEXT: ret void
;
@@ -641,9 +641,9 @@ define void @streaming_caller_single_streaming_compatible_callee() #0 "aarch64_
ret void
}
-define void @streaming_caller_multiple_streaming_compatible_callees() #0 "aarch64_pstate_sm_enabled" {
-; CHECK-LABEL: define void @streaming_caller_multiple_streaming_compatible_callees
-; CHECK-SAME: () #[[ATTR7]] {
+define void @nonstreaming_caller_multiple_streaming_compatible_callees() #0 {
+; CHECK-LABEL: define void @nonstreaming_caller_multiple_streaming_compatible_callees
+; CHECK-SAME: () #[[ATTR1]] {
; CHECK-NEXT: call void @streaming_compatible_body()
; CHECK-NEXT: call void @streaming_compatible_body()
; CHECK-NEXT: ret void
@@ -654,25 +654,67 @@ define void @streaming_caller_multiple_streaming_compatible_callees() #0 "aarch
}
; Allow inlining, as inline would remove a streaming-mode change.
-define void @streaming_caller_single_streaming_compatible_callee_inline() #0 {
-; CHECK-LABEL: define void @streaming_caller_single_streaming_compatible_callee_inline
-; CHECK-SAME: () #[[ATTR6]] {
+define void @streaming_caller_to_nonstreaming_callee_with_single_streamingcompatible_callee_inline() #0 "aarch64_pstate_sm_enabled" {
+; CHECK-LABEL: define void @streaming_caller_to_nonstreaming_callee_with_single_streamingcompatible_callee_inline
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: call void @streaming_compatible_body()
; CHECK-NEXT: ret void
;
- call void @streaming_caller_single_streaming_compatible_callee()
+ call void @nonstreaming_caller_single_streaming_compatible_callee()
ret void
}
-; Allow inlining, as inline would remove several stremaing-mode changes.
-define void @streaming_caller_multiple_streaming_compatible_callees_inline() #0 {
-; CHECK-LABEL: define void @streaming_caller_multiple_streaming_compatible_callees_inline
-; CHECK-SAME: () #[[ATTR6]] {
+; Allow inlining, as inline would remove several streaming-mode changes.
+define void @streaming_caller_to_nonstreaming_callee_with_multiple_streamingcompatible_callees_inline() #0 "aarch64_pstate_sm_enabled" {
+; CHECK-LABEL: define void @streaming_caller_to_nonstreaming_callee_with_multiple_streamingcompatible_callees_inline
+; CHECK-SAME: () #[[ATTR2]] {
; CHECK-NEXT: call void @streaming_compatible_body()
; CHECK-NEXT: call void @streaming_compatible_body()
; CHECK-NEXT: ret void
;
- call void @streaming_caller_multiple_streaming_compatible_callees()
+ call void @nonstreaming_caller_multiple_streaming_compatible_callees()
+ ret void
+}
+
+define void @simple_streaming_function(ptr %ptr) #0 "aarch64_pstate_sm_enabled" {
+; CHECK-LABEL: define void @simple_streaming_function
+; CHECK-SAME: (ptr [[PTR:%.*]]) #[[ATTR2]] {
+; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, ptr [[PTR]], align 16
+; CHECK-NEXT: ret void
+;
+ store <vscale x 4 x i32> zeroinitializer, ptr %ptr
+ ret void
+}
+
+; Don't allow inlining a streaming function into a non-streaming function.
+define void @non_streaming_caller_streaming_callee_dont_inline(ptr %ptr) #0 {
+; CHECK-LABEL: define void @non_streaming_caller_streaming_callee_dont_inline
+; CHECK-SAME: (ptr [[PTR:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: call void @simple_streaming_function(ptr [[PTR]])
+; CHECK-NEXT: ret void
+;
+ call void @simple_streaming_function(ptr %ptr)
+ ret void
+}
+
+define void @simple_locally_streaming_function(ptr %ptr) #0 "aarch64_pstate_sm_body" {
+; CHECK-LABEL: define void @simple_locally_streaming_function
+; CHECK-SAME: (ptr [[PTR:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: store <vscale x 4 x i32> zeroinitializer, ptr [[PTR]], align 16
+; CHECK-NEXT: ret void
+;
+ store <vscale x 4 x i32> zeroinitializer, ptr %ptr
+ ret void
+}
+
+; Don't allow inlining a locally-streaming function into a non-streaming function.
+define void @non_streaming_caller_locally_streaming_callee_dont_inline(ptr %ptr) #0 {
+; CHECK-LABEL: define void @non_streaming_caller_locally_streaming_callee_dont_inline
+; CHECK-SAME: (ptr [[PTR:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: call void @simple_locally_streaming_function(ptr [[PTR]])
+; CHECK-NEXT: ret void
+;
+ call void @simple_locally_streaming_function(ptr %ptr)
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll b/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll
index 934852d..02042b1 100644
--- a/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll
+++ b/llvm/test/Transforms/InstCombine/fpclass-from-dom-cond.ll
@@ -131,10 +131,10 @@ define i1 @test5(double %x, i1 %cond) {
; CHECK: if.then:
; CHECK-NEXT: ret i1 false
; CHECK: if.end:
+; CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f64(double [[X]], i32 408)
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
-; CHECK-NEXT: [[Y:%.*]] = phi double [ -1.000000e+00, [[ENTRY:%.*]] ], [ [[X]], [[IF_END]] ]
-; CHECK-NEXT: [[RET:%.*]] = tail call i1 @llvm.is.fpclass.f64(double [[Y]], i32 408)
+; CHECK-NEXT: [[RET:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ [[TMP0]], [[IF_END]] ]
; CHECK-NEXT: ret i1 [[RET]]
;
entry:
@@ -391,11 +391,9 @@ define float @test_signbit_check_fail(float %x, i1 %cond) {
; CHECK: if.else:
; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN2:%.*]], label [[IF_END]]
; CHECK: if.then2:
-; CHECK-NEXT: [[FNEG2:%.*]] = fneg float [[X]]
; CHECK-NEXT: br label [[IF_END]]
; CHECK: if.end:
-; CHECK-NEXT: [[VALUE:%.*]] = phi float [ [[FNEG]], [[IF_THEN1]] ], [ [[FNEG2]], [[IF_THEN2]] ], [ [[X]], [[IF_ELSE]] ]
-; CHECK-NEXT: [[RET:%.*]] = call float @llvm.fabs.f32(float [[VALUE]])
+; CHECK-NEXT: [[RET:%.*]] = phi float [ [[FNEG]], [[IF_THEN1]] ], [ [[X]], [[IF_THEN2]] ], [ [[X]], [[IF_ELSE]] ]
; CHECK-NEXT: ret float [[RET]]
;
%i32 = bitcast float %x to i32
diff --git a/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll b/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll
index a92e0c2..e2f22b8 100644
--- a/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll
+++ b/llvm/test/Transforms/InstCombine/gepofconstgepi8.ll
@@ -293,3 +293,183 @@ entry:
%p2 = getelementptr <vscale x 4 x i32>, ptr %p1, i64 %index
ret ptr %p2
}
+
+define ptr @test_all_nuw(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_nuw(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr nuw i8, ptr %base, i64 1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_partial_nuw1(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_partial_nuw1(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr i8, ptr %base, i64 1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_partial_nuw2(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_partial_nuw2(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr nuw i8, ptr %base, i64 1
+ %index = add i64 %a, 2
+ %p2 = getelementptr nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_partial_nuw3(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_partial_nuw3(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr nuw i8, ptr %base, i64 1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_nuw_disjoint(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_nuw_disjoint(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr nuw i8, ptr %base, i64 1
+ %index = or disjoint i64 %a, 2
+ %p2 = getelementptr nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_inbounds_nuw(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_inbounds_nuw(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr inbounds nuw i8, ptr %base, i64 1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr inbounds nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_partial_inbounds1(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_partial_inbounds1(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr nuw i8, ptr %base, i64 1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr inbounds nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_partial_inbounds2(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_partial_inbounds2(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr inbounds nuw i8, ptr %base, i64 1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_inbounds_partial_nuw1(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_inbounds_partial_nuw1(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 7
+; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr inbounds i8, ptr %base, i64 -1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr inbounds nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_inbounds_partial_nuw2(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_inbounds_partial_nuw2(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr inbounds nuw i8, ptr %base, i64 1
+ %index = add nuw i64 %a, 2
+ %p2 = getelementptr inbounds i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_inbounds_partial_nuw3(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_inbounds_partial_nuw3(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr inbounds nuw i8, ptr %base, i64 1
+ %index = add i64 %a, 2
+ %p2 = getelementptr inbounds nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
+
+define ptr @test_all_nusw_nuw(ptr %base, i64 %a) {
+; CHECK-LABEL: define ptr @test_all_nusw_nuw(
+; CHECK-SAME: ptr [[BASE:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr nuw i8, ptr [[BASE]], i64 9
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i32, ptr [[TMP0]], i64 [[A]]
+; CHECK-NEXT: ret ptr [[P2]]
+;
+entry:
+ %p1 = getelementptr nusw nuw i8, ptr %base, i64 1
+ %index = add nsw nuw i64 %a, 2
+ %p2 = getelementptr nusw nuw i32, ptr %p1, i64 %index
+ ret ptr %p2
+}
diff --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll
index 7c1342a..55b5b5e 100644
--- a/llvm/test/Transforms/InstCombine/getelementptr.ll
+++ b/llvm/test/Transforms/InstCombine/getelementptr.ll
@@ -356,7 +356,7 @@ define i1 @test13_i16(i16 %X, ptr %P) {
define i1 @test13_i128(i128 %X, ptr %P) {
; CHECK-LABEL: @test13_i128(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i128 [[X:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i128 [[X:%.*]] to i64
; CHECK-NEXT: [[C:%.*]] = icmp eq i64 [[TMP1]], -1
; CHECK-NEXT: ret i1 [[C]]
;
@@ -412,7 +412,7 @@ define ptr @test_index_canon_inbounds(ptr %X, i32 %Idx) {
define ptr @test_index_canon_nusw_nuw(ptr %X, i32 %Idx) {
; CHECK-LABEL: @test_index_canon_nusw_nuw(
-; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[IDX:%.*]] to i64
; CHECK-NEXT: [[R:%.*]] = getelementptr nusw nuw i32, ptr [[X:%.*]], i64 [[TMP1]]
; CHECK-NEXT: ret ptr [[R]]
;
@@ -568,7 +568,7 @@ define i32 @test20(ptr %P, i32 %A, i32 %B) {
define i32 @test20_as1(ptr addrspace(1) %P, i32 %A, i32 %B) {
; CHECK-LABEL: @test20_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[A:%.*]] to i16
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i32 [[A:%.*]] to i16
; CHECK-NEXT: [[T6:%.*]] = icmp eq i16 [[TMP1]], 0
; CHECK-NEXT: [[T7:%.*]] = zext i1 [[T6]] to i32
; CHECK-NEXT: ret i32 [[T7]]
@@ -1978,4 +1978,94 @@ define ptr @gep_merge_nusw_const(ptr %p, i64 %idx, i64 %idx2) {
ret ptr %gep
}
+define ptr @gep_index_trunc_nothing(ptr %p, i128 %idx) {
+; CHECK-LABEL: @gep_index_trunc_nothing(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i128 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr i8, ptr %p, i128 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_trunc_nuw(ptr %p, i128 %idx) {
+; CHECK-LABEL: @gep_index_trunc_nuw(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nuw i128 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nuw i8, ptr %p, i128 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_trunc_nusw(ptr %p, i128 %idx) {
+; CHECK-LABEL: @gep_index_trunc_nusw(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i128 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nusw i8, ptr %p, i128 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_trunc_inbounds(ptr %p, i128 %idx) {
+; CHECK-LABEL: @gep_index_trunc_inbounds(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i128 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr inbounds i8, ptr %p, i128 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_trunc_nusw_nuw(ptr %p, i128 %idx) {
+; CHECK-LABEL: @gep_index_trunc_nusw_nuw(
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nuw nsw i128 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw nuw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nusw nuw i8, ptr %p, i128 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_ext_nothing(ptr %p, i32 %idx) {
+; CHECK-LABEL: @gep_index_ext_nothing(
+; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr i8, ptr %p, i32 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_ext_nuw(ptr %p, i32 %idx) {
+; CHECK-LABEL: @gep_index_ext_nuw(
+; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nuw i8, ptr %p, i32 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_ext_nusw(ptr %p, i32 %idx) {
+; CHECK-LABEL: @gep_index_ext_nusw(
+; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nuw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nuw i8, ptr %p, i32 %idx
+ ret ptr %gep
+}
+
+define ptr @gep_index_ext_nusw_nuw(ptr %p, i32 %idx) {
+; CHECK-LABEL: @gep_index_ext_nusw_nuw(
+; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[IDX:%.*]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr nusw nuw i8, ptr [[P:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+ %gep = getelementptr nusw nuw i8, ptr %p, i32 %idx
+ ret ptr %gep
+}
+
!0 = !{!"branch_weights", i32 2, i32 10}
diff --git a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
index 1296dc6..f873551 100644
--- a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll
@@ -39,9 +39,9 @@ define i1 @test59_as1(ptr addrspace(1) %foo) {
define i1 @test60(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[I:%.*]] to i32
; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i32 [[TMP1]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i32
+; CHECK-NEXT: [[TMP2:%.*]] = trunc nsw i64 [[J:%.*]] to i32
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[GEP1_IDX]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -53,9 +53,9 @@ define i1 @test60(ptr %foo, i64 %i, i64 %j) {
define i1 @test60_as1(ptr addrspace(1) %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i16
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[I:%.*]] to i16
; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[TMP1]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = trunc nsw i64 [[J:%.*]] to i16
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -69,7 +69,7 @@ define i1 @test60_as1(ptr addrspace(1) %foo, i64 %i, i64 %j) {
; bitcast. This uses the same sized addrspace.
define i1 @test60_addrspacecast(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60_addrspacecast(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[J:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[J:%.*]] to i32
; CHECK-NEXT: [[I_TR:%.*]] = trunc i64 [[I:%.*]] to i32
; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[I_TR]], 2
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP2]], [[TMP1]]
diff --git a/llvm/test/Transforms/InstCombine/icmp-gep.ll b/llvm/test/Transforms/InstCombine/icmp-gep.ll
index 5044850ec..1385dc3 100644
--- a/llvm/test/Transforms/InstCombine/icmp-gep.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-gep.ll
@@ -286,8 +286,7 @@ define i1 @PR8882(i64 %i) {
define i1 @test24_as1(i64 %i) {
; CHECK-LABEL: @test24_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[I:%.*]], 65535
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP1]], 1000
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP1:%.*]], 1000
; CHECK-NEXT: ret i1 [[CMP]]
;
%p1 = getelementptr inbounds i32, ptr addrspace(1) @X_as1, i64 %i
@@ -449,9 +448,9 @@ define i1 @test_gep_eq_no_inbounds(ptr %foo, i64 %i, i64 %j) {
define i1 @test60_as1(ptr addrspace(1) %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test60_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i16
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[I:%.*]] to i16
; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[TMP1]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i16
+; CHECK-NEXT: [[TMP2:%.*]] = trunc nsw i64 [[J:%.*]] to i16
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]]
; CHECK-NEXT: ret i1 [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll b/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll
index 53c9736..07486ff 100644
--- a/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll
+++ b/llvm/test/Transforms/InstCombine/indexed-gep-compares.ll
@@ -376,7 +376,7 @@ define i1 @test8(ptr %in, i64 %offset) {
; CHECK-NEXT: [[LD:%.*]] = load i64, ptr [[IN:%.*]], align 8
; CHECK-NEXT: [[TMP0:%.*]] = trunc i64 [[LD]] to i32
; CHECK-NEXT: [[CASTI8:%.*]] = inttoptr i32 [[TMP0]] to ptr
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[OFFSET:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[OFFSET:%.*]] to i32
; CHECK-NEXT: [[GEPI8:%.*]] = getelementptr inbounds i8, ptr [[CASTI8]], i32 [[TMP1]]
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[LD]] to i32
; CHECK-NEXT: [[PTRCAST:%.*]] = inttoptr i32 [[TMP2]] to ptr
diff --git a/llvm/test/Transforms/InstCombine/known-phi-recurse.ll b/llvm/test/Transforms/InstCombine/known-phi-recurse.ll
index c05cca9..ac44e6c 100644
--- a/llvm/test/Transforms/InstCombine/known-phi-recurse.ll
+++ b/llvm/test/Transforms/InstCombine/known-phi-recurse.ll
@@ -261,14 +261,11 @@ define i8 @knownbits_umax_select_test() {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i8 [ 0, [[ENTRY:%.*]] ], [ [[CONTAIN:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[COND0:%.*]] = call i1 @cond()
-; CHECK-NEXT: [[CONTAIN]] = call i8 @llvm.umax.i8(i8 [[INDVAR]], i8 1)
; CHECK-NEXT: [[COND1:%.*]] = call i1 @cond()
; CHECK-NEXT: br i1 [[COND1]], label [[EXIT:%.*]], label [[LOOP]]
; CHECK: exit:
-; CHECK-NEXT: [[BOOL:%.*]] = and i8 [[CONTAIN]], 1
-; CHECK-NEXT: ret i8 [[BOOL]]
+; CHECK-NEXT: ret i8 1
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/InstCombine/load-cmp.ll b/llvm/test/Transforms/InstCombine/load-cmp.ll
index ccaf31f..df34e7d 100644
--- a/llvm/test/Transforms/InstCombine/load-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/load-cmp.ll
@@ -293,8 +293,7 @@ define i1 @test10_struct_arr_i16(i16 %x) {
define i1 @test10_struct_arr_i64(i64 %x) {
; CHECK-LABEL: @test10_struct_arr_i64(
-; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 4294967295
-; CHECK-NEXT: [[R:%.*]] = icmp ne i64 [[TMP1]], 1
+; CHECK-NEXT: [[R:%.*]] = icmp ne i64 [[TMP1:%.*]], 1
; CHECK-NEXT: ret i1 [[R]]
;
%p = getelementptr inbounds [4 x %Foo], ptr @GStructArr, i64 0, i64 %x, i32 2
@@ -331,7 +330,7 @@ define i1 @test10_struct_arr_noinbounds_i64(i64 %x) {
define i1 @pr93017(i64 %idx) {
; CHECK-LABEL: @pr93017(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[IDX:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[IDX:%.*]] to i32
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr @table, i32 0, i32 [[TMP1]]
; CHECK-NEXT: [[V:%.*]] = load ptr, ptr [[GEP]], align 4
; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[V]], null
diff --git a/llvm/test/Transforms/InstCombine/phi.ll b/llvm/test/Transforms/InstCombine/phi.ll
index 4756b4f..3454835 100644
--- a/llvm/test/Transforms/InstCombine/phi.ll
+++ b/llvm/test/Transforms/InstCombine/phi.ll
@@ -2998,3 +2998,58 @@ join:
%cmp = icmp eq i32 %phi, 0
ret i1 %cmp
}
+
+declare void @may_exit()
+
+define i32 @intrinsic_over_phi_noundef(i1 %c, i1 %c2, i32 %a) {
+; CHECK-LABEL: @intrinsic_over_phi_noundef(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[C:%.*]], label [[IF:%.*]], label [[JOIN:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.umax.i32(i32 [[A:%.*]], i32 1)
+; CHECK-NEXT: br label [[JOIN]]
+; CHECK: join:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[TMP0]], [[IF]] ], [ 1, [[ENTRY:%.*]] ]
+; CHECK-NEXT: call void @may_exit()
+; CHECK-NEXT: ret i32 [[PHI]]
+;
+entry:
+ br i1 %c, label %if, label %join
+
+if:
+ br label %join
+
+join:
+ %phi = phi i32 [ %a, %if ], [ 0, %entry ]
+ call void @may_exit()
+ %umax = call noundef i32 @llvm.umax(i32 noundef %phi, i32 1)
+ ret i32 %umax
+}
+
+define i32 @multiple_intrinsics_with_multiple_phi_uses(i1 %c, i32 %arg) {
+; CHECK-LABEL: @multiple_intrinsics_with_multiple_phi_uses(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[C:%.*]], label [[IF:%.*]], label [[IF_END:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[ARG:%.*]], -8
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.fshl.i32(i32 [[ADD]], i32 [[ADD]], i32 29)
+; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[TMP0]], 1
+; CHECK-NEXT: br label [[IF_END]]
+; CHECK: if.end:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[TMP1]], [[IF]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret i32 [[PHI]]
+;
+entry:
+ br i1 %c, label %if, label %if.end
+
+if:
+ %add = add i32 %arg, -8
+ br label %if.end
+
+if.end:
+ %phi = phi i32 [ %add, %if ], [ 0, %entry ]
+ %fshl1 = call i32 @llvm.fshl.i32(i32 %phi, i32 %phi, i32 29)
+ %fshl2 = call i32 @llvm.fshl.i32(i32 %phi, i32 %phi, i32 29)
+ %add2 = add i32 %fshl1, %fshl2
+ ret i32 %add2
+}
diff --git a/llvm/test/Transforms/InstCombine/pr39908.ll b/llvm/test/Transforms/InstCombine/pr39908.ll
index 5d13a33..c36495d 100644
--- a/llvm/test/Transforms/InstCombine/pr39908.ll
+++ b/llvm/test/Transforms/InstCombine/pr39908.ll
@@ -19,7 +19,7 @@ define i1 @test(ptr %p, i32 %n) {
; Same test using 64-bit indices.
define i1 @test64(ptr %p, i64 %n) {
; CHECK-LABEL: @test64(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[N:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[N:%.*]] to i32
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 1
; CHECK-NEXT: ret i1 [[CMP]]
;
@@ -32,7 +32,7 @@ define i1 @test64(ptr %p, i64 %n) {
; Here the offset overflows and is treated modulo 2^32. This is UB.
define i1 @test64_overflow(ptr %p, i64 %n) {
; CHECK-LABEL: @test64_overflow(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[N:%.*]] to i32
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[N:%.*]] to i32
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 1
; CHECK-NEXT: ret i1 [[CMP]]
;
diff --git a/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll b/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll
index c637481..86e586e 100644
--- a/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll
+++ b/llvm/test/Transforms/InstCombine/recurrence-binary-intrinsic.ll
@@ -8,12 +8,11 @@ define i8 @simple_recurrence_intrinsic_smax(i8 %n, i8 %a, i8 %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SMAX_ACC:%.*]] = phi i8 [ [[SMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[SMAX]] = call i8 @llvm.smax.i8(i8 [[SMAX_ACC]], i8 [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[SMAX:%.*]] = call i8 @llvm.smax.i8(i8 [[A]], i8 [[B]])
; CHECK-NEXT: ret i8 [[SMAX]]
;
entry:
@@ -38,12 +37,11 @@ define i8 @simple_recurrence_intrinsic_smin(i8 %n, i8 %a, i8 %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[SMIN_ACC:%.*]] = phi i8 [ [[SMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[SMIN]] = call i8 @llvm.smin.i8(i8 [[SMIN_ACC]], i8 [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[SMIN:%.*]] = call i8 @llvm.smin.i8(i8 [[A]], i8 [[B]])
; CHECK-NEXT: ret i8 [[SMIN]]
;
entry:
@@ -68,12 +66,11 @@ define i8 @simple_recurrence_intrinsic_umax(i8 %n, i8 %a, i8 %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[UMAX_ACC:%.*]] = phi i8 [ [[UMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[UMAX]] = call i8 @llvm.umax.i8(i8 [[UMAX_ACC]], i8 [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[UMAX:%.*]] = call i8 @llvm.umax.i8(i8 [[A]], i8 [[B]])
; CHECK-NEXT: ret i8 [[UMAX]]
;
entry:
@@ -98,12 +95,11 @@ define i8 @simple_recurrence_intrinsic_umin(i8 %n, i8 %a, i8 %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[UMIN_ACC:%.*]] = phi i8 [ [[UMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[UMIN]] = call i8 @llvm.umin.i8(i8 [[UMIN_ACC]], i8 [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i8 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[UMIN:%.*]] = call i8 @llvm.umin.i8(i8 [[A]], i8 [[B]])
; CHECK-NEXT: ret i8 [[UMIN]]
;
entry:
@@ -128,12 +124,11 @@ define float @simple_recurrence_intrinsic_maxnum(i32 %n, float %a, float %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX_ACC:%.*]] = phi float [ [[FMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX]] = call float @llvm.maxnum.f32(float [[FMAX_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMAX:%.*]] = call float @llvm.maxnum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMAX]]
;
entry:
@@ -157,12 +152,11 @@ define float @simple_recurrence_intrinsic_minnum(i32 %n, float %a, float %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN_ACC:%.*]] = phi float [ [[FMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN]] = call float @llvm.minnum.f32(float [[FMIN_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMIN:%.*]] = call float @llvm.minnum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMIN]]
;
entry:
@@ -186,12 +180,11 @@ define float @simple_recurrence_intrinsic_maximum(i32 %n, float %a, float %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX_ACC:%.*]] = phi float [ [[FMAX:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMAX]] = call nnan float @llvm.maximum.f32(float [[FMAX_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMAX:%.*]] = call nnan float @llvm.maximum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMAX]]
;
entry:
@@ -215,12 +208,11 @@ define float @simple_recurrence_intrinsic_minimum(i32 %n, float %a, float %b) {
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN_ACC:%.*]] = phi float [ [[FMIN:%.*]], %[[LOOP]] ], [ [[A]], %[[ENTRY]] ]
-; CHECK-NEXT: [[FMIN]] = call nnan float @llvm.minimum.f32(float [[FMIN_ACC]], float [[B]])
; CHECK-NEXT: [[IV_NEXT]] = add nuw i32 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[EXIT:.*]]
; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[FMIN:%.*]] = call nnan float @llvm.minimum.f32(float [[A]], float [[B]])
; CHECK-NEXT: ret float [[FMIN]]
;
entry:
diff --git a/llvm/test/Transforms/InstCombine/scalable-extract-subvec-elt.ll b/llvm/test/Transforms/InstCombine/scalable-extract-subvec-elt.ll
new file mode 100644
index 0000000..1e089e1
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/scalable-extract-subvec-elt.ll
@@ -0,0 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=instcombine < %s | FileCheck %s
+
+define i1 @extract_const_idx(<vscale x 4 x i1> %a) {
+; CHECK-LABEL: define i1 @extract_const_idx(
+; CHECK-SAME: <vscale x 4 x i1> [[A:%.*]]) {
+; CHECK-NEXT: [[ELT:%.*]] = extractelement <vscale x 4 x i1> [[A]], i64 1
+; CHECK-NEXT: ret i1 [[ELT]]
+;
+ %subvec = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1.nxv4i1.i64(<vscale x 4 x i1> %a, i64 0)
+ %elt = extractelement <vscale x 2 x i1> %subvec, i32 1
+ ret i1 %elt
+}
+
+define float @extract_variable_idx(<vscale x 4 x float> %a, i32 %idx) {
+; CHECK-LABEL: define float @extract_variable_idx(
+; CHECK-SAME: <vscale x 4 x float> [[A:%.*]], i32 [[IDX:%.*]]) {
+; CHECK-NEXT: [[ELT:%.*]] = extractelement <vscale x 4 x float> [[A]], i32 [[IDX]]
+; CHECK-NEXT: ret float [[ELT]]
+;
+ %subvec = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32.i64(<vscale x 4 x float> %a, i64 0)
+ %elt = extractelement <vscale x 2 x float> %subvec, i32 %idx
+ ret float %elt
+}
+
+define float @negative_test(<vscale x 4 x float> %a) {
+; CHECK-LABEL: define float @negative_test(
+; CHECK-SAME: <vscale x 4 x float> [[A:%.*]]) {
+; CHECK-NEXT: [[SUBVEC:%.*]] = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[A]], i64 2)
+; CHECK-NEXT: [[ELT:%.*]] = extractelement <vscale x 2 x float> [[SUBVEC]], i64 1
+; CHECK-NEXT: ret float [[ELT]]
+;
+ %subvec = call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv4f32.i64(<vscale x 4 x float> %a, i64 2)
+ %elt = extractelement <vscale x 2 x float> %subvec, i32 1
+ ret float %elt
+}
diff --git a/llvm/test/Transforms/InstCombine/sub-gep.ll b/llvm/test/Transforms/InstCombine/sub-gep.ll
index 45e5686..0db5cbe 100644
--- a/llvm/test/Transforms/InstCombine/sub-gep.ll
+++ b/llvm/test/Transforms/InstCombine/sub-gep.ll
@@ -394,7 +394,7 @@ define i64 @negative_ptrtoint_sub_zext_ptrtoint(ptr %p, i32 %offset) {
define i16 @test25_as1(ptr addrspace(1) %P, i64 %A) {
; CHECK-LABEL: @test25_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i16
+; CHECK-NEXT: [[TMP1:%.*]] = trunc nsw i64 [[A:%.*]] to i16
; CHECK-NEXT: [[B_IDX:%.*]] = shl nsw i16 [[TMP1]], 1
; CHECK-NEXT: [[GEPDIFF:%.*]] = add nsw i16 [[B_IDX]], -84
; CHECK-NEXT: ret i16 [[GEPDIFF]]
diff --git a/llvm/test/Transforms/InstSimplify/const-fold-nvvm-unary-arithmetic.ll b/llvm/test/Transforms/InstSimplify/const-fold-nvvm-unary-arithmetic.ll
index 75b8509..6eed7f8 100644
--- a/llvm/test/Transforms/InstSimplify/const-fold-nvvm-unary-arithmetic.ll
+++ b/llvm/test/Transforms/InstSimplify/const-fold-nvvm-unary-arithmetic.ll
@@ -416,6 +416,54 @@ define float @test_round_ftz_f_neg_1_5() {
ret float %res
}
+define double @test_round_d_2_5() {
+; CHECK-LABEL: define double @test_round_d_2_5() {
+; CHECK-NEXT: ret double 2.000000e+00
+;
+ %res = call double @llvm.nvvm.round.d(double 2.5)
+ ret double %res
+}
+
+define float @test_round_f_2_5() {
+; CHECK-LABEL: define float @test_round_f_2_5() {
+; CHECK-NEXT: ret float 2.000000e+00
+;
+ %res = call float @llvm.nvvm.round.f(float 2.5)
+ ret float %res
+}
+
+define float @test_round_ftz_f_2_5() {
+; CHECK-LABEL: define float @test_round_ftz_f_2_5() {
+; CHECK-NEXT: ret float 2.000000e+00
+;
+ %res = call float @llvm.nvvm.round.ftz.f(float 2.5)
+ ret float %res
+}
+
+define double @test_round_d_neg_2_5() {
+; CHECK-LABEL: define double @test_round_d_neg_2_5() {
+; CHECK-NEXT: ret double -2.000000e+00
+;
+ %res = call double @llvm.nvvm.round.d(double -2.5)
+ ret double %res
+}
+
+define float @test_round_f_neg_2_5() {
+; CHECK-LABEL: define float @test_round_f_neg_2_5() {
+; CHECK-NEXT: ret float -2.000000e+00
+;
+ %res = call float @llvm.nvvm.round.f(float -2.5)
+ ret float %res
+}
+
+define float @test_round_ftz_f_neg_2_5() {
+; CHECK-LABEL: define float @test_round_ftz_f_neg_2_5() {
+; CHECK-NEXT: ret float -2.000000e+00
+;
+ %res = call float @llvm.nvvm.round.ftz.f(float -2.5)
+ ret float %res
+}
+
define double @test_round_d_neg_subnorm() {
; CHECK-LABEL: define double @test_round_d_neg_subnorm() {
; CHECK-NEXT: ret double -0.000000e+00
diff --git a/llvm/test/Transforms/LICM/gep-reassociate.ll b/llvm/test/Transforms/LICM/gep-reassociate.ll
index 630a751..0090c76 100644
--- a/llvm/test/Transforms/LICM/gep-reassociate.ll
+++ b/llvm/test/Transforms/LICM/gep-reassociate.ll
@@ -39,11 +39,13 @@ exit:
ret void
}
-define void @both_inbounds_one_neg(ptr %ptr, i1 %c) {
+define void @both_inbounds_one_neg(ptr %ptr, i1 %c, i64 %neg) {
; CHECK-LABEL: define void @both_inbounds_one_neg
-; CHECK-SAME: (ptr [[PTR:%.*]], i1 [[C:%.*]]) {
+; CHECK-SAME: (ptr [[PTR:%.*]], i1 [[C:%.*]], i64 [[NEG:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[PTR]], i64 -1
+; CHECK-NEXT: [[IS_NEG:%.*]] = icmp slt i64 [[NEG]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[IS_NEG]])
+; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[NEG]]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[VAL:%.*]] = call i32 @get.i32()
@@ -55,13 +57,15 @@ define void @both_inbounds_one_neg(ptr %ptr, i1 %c) {
; CHECK-NEXT: ret void
;
entry:
+ %is.neg = icmp slt i64 %neg, 0
+ call void @llvm.assume(i1 %is.neg)
br label %loop
loop:
%val = call i32 @get.i32()
%val.ext = zext i32 %val to i64
%ptr2 = getelementptr inbounds i8, ptr %ptr, i64 %val.ext
- %ptr3 = getelementptr i8, ptr %ptr2, i64 -1
+ %ptr3 = getelementptr i8, ptr %ptr2, i64 %neg
call void @use(ptr %ptr3)
br i1 %c, label %loop, label %exit
@@ -69,11 +73,13 @@ exit:
ret void
}
-define void @both_inbounds_pos(ptr %ptr, i1 %c) {
+define void @both_inbounds_pos(ptr %ptr, i1 %c, i64 %nonneg) {
; CHECK-LABEL: define void @both_inbounds_pos
-; CHECK-SAME: (ptr [[PTR:%.*]], i1 [[C:%.*]]) {
+; CHECK-SAME: (ptr [[PTR:%.*]], i1 [[C:%.*]], i64 [[NONNEG:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 1
+; CHECK-NEXT: [[IS_NONNEG:%.*]] = icmp sge i64 [[NONNEG]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[IS_NONNEG]])
+; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[NONNEG]]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[VAL:%.*]] = call i32 @get.i32()
@@ -85,13 +91,15 @@ define void @both_inbounds_pos(ptr %ptr, i1 %c) {
; CHECK-NEXT: ret void
;
entry:
+ %is.nonneg = icmp sge i64 %nonneg, 0
+ call void @llvm.assume(i1 %is.nonneg)
br label %loop
loop:
%val = call i32 @get.i32()
%val.ext = zext i32 %val to i64
%ptr2 = getelementptr inbounds i8, ptr %ptr, i64 %val.ext
- %ptr3 = getelementptr inbounds i8, ptr %ptr2, i64 1
+ %ptr3 = getelementptr inbounds i8, ptr %ptr2, i64 %nonneg
call void @use(ptr %ptr3)
br i1 %c, label %loop, label %exit
@@ -440,3 +448,32 @@ latch:
exit:
ret void
}
+
+; Do not reassociate constant offset GEP.
+define void @constant_offset(ptr %ptr, i1 %c) {
+; CHECK-LABEL: define void @constant_offset
+; CHECK-SAME: (ptr [[PTR:%.*]], i1 [[C:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[VAL:%.*]] = call i64 @get.i64()
+; CHECK-NEXT: [[GEP_BASE:%.*]] = getelementptr i8, ptr [[PTR]], i64 [[VAL]]
+; CHECK-NEXT: [[GEP_OFF:%.*]] = getelementptr i8, ptr [[GEP_BASE]], i64 1
+; CHECK-NEXT: call void @use(ptr [[GEP_OFF]])
+; CHECK-NEXT: br i1 [[C]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %val = call i64 @get.i64()
+ %gep.base = getelementptr i8, ptr %ptr, i64 %val
+ %gep.off = getelementptr i8, ptr %gep.base, i64 1
+ call void @use(ptr %gep.off)
+ br i1 %c, label %loop, label %exit
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll b/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll
index 65aaf72..cd35401 100644
--- a/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll
+++ b/llvm/test/Transforms/LoopIdiom/reuse-lcssa-phi-scev-expansion.ll
@@ -33,11 +33,10 @@ define void @scev_expand_ptrtoint(i8 %x, ptr %start) {
; CHECK: [[LOOP_3_PREHEADER]]:
; CHECK-NEXT: [[INDVAR_LCSSA:%.*]] = phi i64 [ [[INDVAR]], %[[LOOP_2_HEADER]] ], [ [[INDVAR]], %[[LOOP_2_HEADER]] ]
; CHECK-NEXT: [[PTR_IV_2_LCSSA:%.*]] = phi ptr [ [[PTR_IV_2]], %[[LOOP_2_HEADER]] ], [ [[PTR_IV_2]], %[[LOOP_2_HEADER]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = sub i64 0, [[START1]]
+; CHECK-NEXT: [[TMP0:%.*]] = sub i64 1, [[START1]]
; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR_IV_1_LCSSA]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], [[TMP0]]
-; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], 1
-; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[CMP_EXT]], [[TMP3]]
+; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[CMP_EXT]], [[TMP2]]
; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDVAR_LCSSA]], [[TMP4]]
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP5]]
; CHECK-NEXT: [[STRLEN:%.*]] = call i64 @strlen(ptr [[SCEVGEP]])
diff --git a/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll b/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll
index aa954aeb..9003072 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll
@@ -383,14 +383,14 @@ define void @vscale_squared_offset(ptr %alloc) #0 {
; COMMON-LABEL: vscale_squared_offset:
; COMMON: // %bb.0: // %entry
; COMMON-NEXT: rdvl x9, #1
+; COMMON-NEXT: rdvl x10, #4
; COMMON-NEXT: fmov z0.s, #4.00000000
-; COMMON-NEXT: mov x8, xzr
; COMMON-NEXT: lsr x9, x9, #4
; COMMON-NEXT: fmov z1.s, #8.00000000
-; COMMON-NEXT: cntw x10
+; COMMON-NEXT: mov x8, xzr
; COMMON-NEXT: ptrue p0.s, vl1
-; COMMON-NEXT: umull x9, w9, w9
-; COMMON-NEXT: lsl x9, x9, #6
+; COMMON-NEXT: umull x9, w9, w10
+; COMMON-NEXT: cntw x10
; COMMON-NEXT: cmp x8, x10
; COMMON-NEXT: b.ge .LBB6_2
; COMMON-NEXT: .LBB6_1: // %for.body
diff --git a/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll b/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll
index 8baded8..38d559f 100644
--- a/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll
+++ b/llvm/test/Transforms/LoopUnroll/AArch64/vector.ll
@@ -485,12 +485,206 @@ exit: ; preds = %vector.body
!0 = !{!0, !1}
!1 = !{!"llvm.loop.isvectorized", i32 1}
+; On Cortex-A55 we should runtime unroll the scalar epilogue loop, but not the
+; vector loop.
+define void @scalar_epilogue(ptr %p, i8 %splat.scalar, i64 %n) {
+; APPLE-LABEL: define void @scalar_epilogue(
+; APPLE-SAME: ptr [[P:%.*]], i8 [[SPLAT_SCALAR:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; APPLE-NEXT: [[ENTRY:.*]]:
+; APPLE-NEXT: [[MIN_ITERS_CHECK7:%.*]] = icmp ult i64 [[N]], 32
+; APPLE-NEXT: br i1 [[MIN_ITERS_CHECK7]], label %[[SCALAR_REMAINDER_PREHEADER:.*]], label %[[VECTOR_PH:.*]]
+; APPLE: [[VECTOR_PH]]:
+; APPLE-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -32
+; APPLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[SPLAT_SCALAR]], i64 0
+; APPLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
+; APPLE-NEXT: br label %[[VECTOR_BODY:.*]]
+; APPLE: [[VECTOR_BODY]]:
+; APPLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; APPLE-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INDEX]]
+; APPLE-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
+; APPLE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
+; APPLE-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
+; APPLE-NEXT: [[TMP2:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; APPLE-NEXT: [[TMP3:%.*]] = add <16 x i8> [[WIDE_LOAD8]], [[BROADCAST_SPLAT]]
+; APPLE-NEXT: store <16 x i8> [[TMP2]], ptr [[TMP0]], align 1
+; APPLE-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP1]], align 1
+; APPLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; APPLE-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; APPLE-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; APPLE: [[MIDDLE_BLOCK]]:
+; APPLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; APPLE-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER]]
+; APPLE: [[SCALAR_REMAINDER_PREHEADER]]:
+; APPLE-NEXT: [[IV_SCALAR_LOOP_PH:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
+; APPLE-NEXT: br label %[[SCALAR_REMAINDER:.*]]
+; APPLE: [[SCALAR_REMAINDER]]:
+; APPLE-NEXT: [[I_06:%.*]] = phi i64 [ [[INC:%.*]], %[[SCALAR_REMAINDER]] ], [ [[IV_SCALAR_LOOP_PH]], %[[SCALAR_REMAINDER_PREHEADER]] ]
+; APPLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[I_06]]
+; APPLE-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; APPLE-NEXT: [[ADD:%.*]] = add i8 [[TMP8]], [[SPLAT_SCALAR]]
+; APPLE-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
+; APPLE-NEXT: [[INC]] = add nuw i64 [[I_06]], 1
+; APPLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
+; APPLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_REMAINDER]], !llvm.loop [[LOOP5:![0-9]+]]
+; APPLE: [[EXIT_LOOPEXIT]]:
+; APPLE-NEXT: br label %[[EXIT]]
+; APPLE: [[EXIT]]:
+; APPLE-NEXT: ret void
+;
+; CORTEXA55-LABEL: define void @scalar_epilogue(
+; CORTEXA55-SAME: ptr [[P:%.*]], i8 [[SPLAT_SCALAR:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CORTEXA55-NEXT: [[ENTRY:.*]]:
+; CORTEXA55-NEXT: [[MIN_ITERS_CHECK7:%.*]] = icmp ult i64 [[N]], 32
+; CORTEXA55-NEXT: br i1 [[MIN_ITERS_CHECK7]], label %[[SCALAR_REMAINDER_PREHEADER:.*]], label %[[VECTOR_PH:.*]]
+; CORTEXA55: [[VECTOR_PH]]:
+; CORTEXA55-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -32
+; CORTEXA55-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[SPLAT_SCALAR]], i64 0
+; CORTEXA55-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
+; CORTEXA55-NEXT: br label %[[VECTOR_BODY:.*]]
+; CORTEXA55: [[VECTOR_BODY]]:
+; CORTEXA55-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CORTEXA55-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INDEX]]
+; CORTEXA55-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16
+; CORTEXA55-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
+; CORTEXA55-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
+; CORTEXA55-NEXT: [[TMP2:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CORTEXA55-NEXT: [[TMP3:%.*]] = add <16 x i8> [[WIDE_LOAD8]], [[BROADCAST_SPLAT]]
+; CORTEXA55-NEXT: store <16 x i8> [[TMP2]], ptr [[TMP0]], align 1
+; CORTEXA55-NEXT: store <16 x i8> [[TMP3]], ptr [[TMP1]], align 1
+; CORTEXA55-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; CORTEXA55-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CORTEXA55-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
+; CORTEXA55: [[MIDDLE_BLOCK]]:
+; CORTEXA55-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CORTEXA55-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER]]
+; CORTEXA55: [[SCALAR_REMAINDER_PREHEADER]]:
+; CORTEXA55-NEXT: [[I_06_PH:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
+; CORTEXA55-NEXT: [[TMP8:%.*]] = sub i64 [[N]], [[I_06_PH]]
+; CORTEXA55-NEXT: [[TMP9:%.*]] = add i64 [[N]], -1
+; CORTEXA55-NEXT: [[TMP10:%.*]] = sub i64 [[TMP9]], [[I_06_PH]]
+; CORTEXA55-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP8]], 3
+; CORTEXA55-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0
+; CORTEXA55-NEXT: br i1 [[LCMP_MOD]], label %[[SCALAR_REMAINDER_PROL_PREHEADER:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT:.*]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL_PREHEADER]]:
+; CORTEXA55-NEXT: br label %[[SCALAR_REMAINDER_PROL:.*]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL]]:
+; CORTEXA55-NEXT: [[ARRAYIDX_PROL:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[I_06_PH]]
+; CORTEXA55-NEXT: [[TMP11:%.*]] = load i8, ptr [[ARRAYIDX_PROL]], align 1
+; CORTEXA55-NEXT: [[ADD_PROL:%.*]] = add i8 [[TMP11]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_PROL]], ptr [[ARRAYIDX_PROL]], align 1
+; CORTEXA55-NEXT: [[INC_PROL:%.*]] = add nuw i64 [[I_06_PH]], 1
+; CORTEXA55-NEXT: [[PROL_ITER_CMP:%.*]] = icmp ne i64 1, [[XTRAITER]]
+; CORTEXA55-NEXT: br i1 [[PROL_ITER_CMP]], label %[[SCALAR_REMAINDER_PROL_1:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA:.*]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL_1]]:
+; CORTEXA55-NEXT: [[ARRAYIDX_PROL_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL]]
+; CORTEXA55-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX_PROL_1]], align 1
+; CORTEXA55-NEXT: [[ADD_PROL_1:%.*]] = add i8 [[TMP12]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_PROL_1]], ptr [[ARRAYIDX_PROL_1]], align 1
+; CORTEXA55-NEXT: [[INC_PROL_1:%.*]] = add nuw i64 [[I_06_PH]], 2
+; CORTEXA55-NEXT: [[PROL_ITER_CMP_1:%.*]] = icmp ne i64 2, [[XTRAITER]]
+; CORTEXA55-NEXT: br i1 [[PROL_ITER_CMP_1]], label %[[SCALAR_REMAINDER_PROL_2:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL_2]]:
+; CORTEXA55-NEXT: [[ARRAYIDX_PROL_2:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_1]]
+; CORTEXA55-NEXT: [[TMP13:%.*]] = load i8, ptr [[ARRAYIDX_PROL_2]], align 1
+; CORTEXA55-NEXT: [[ADD_PROL_2:%.*]] = add i8 [[TMP13]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_PROL_2]], ptr [[ARRAYIDX_PROL_2]], align 1
+; CORTEXA55-NEXT: [[INC_PROL_2:%.*]] = add nuw i64 [[I_06_PH]], 3
+; CORTEXA55-NEXT: br label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]:
+; CORTEXA55-NEXT: [[IV_SCALAR_LOOP_UNR_PH:%.*]] = phi i64 [ [[INC_PROL]], %[[SCALAR_REMAINDER_PROL]] ], [ [[INC_PROL_1]], %[[SCALAR_REMAINDER_PROL_1]] ], [ [[INC_PROL_2]], %[[SCALAR_REMAINDER_PROL_2]] ]
+; CORTEXA55-NEXT: br label %[[SCALAR_REMAINDER_PROL_LOOPEXIT]]
+; CORTEXA55: [[SCALAR_REMAINDER_PROL_LOOPEXIT]]:
+; CORTEXA55-NEXT: [[IV_SCALAR_LOOP_UNR:%.*]] = phi i64 [ [[I_06_PH]], %[[SCALAR_REMAINDER_PREHEADER]] ], [ [[IV_SCALAR_LOOP_UNR_PH]], %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]] ]
+; CORTEXA55-NEXT: [[TMP14:%.*]] = icmp ult i64 [[TMP10]], 3
+; CORTEXA55-NEXT: br i1 [[TMP14]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER_NEW:.*]]
+; CORTEXA55: [[SCALAR_REMAINDER_PREHEADER_NEW]]:
+; CORTEXA55-NEXT: br label %[[SCALAR_REMAINDER:.*]]
+; CORTEXA55: [[SCALAR_REMAINDER]]:
+; CORTEXA55-NEXT: [[I_06:%.*]] = phi i64 [ [[IV_SCALAR_LOOP_UNR]], %[[SCALAR_REMAINDER_PREHEADER_NEW]] ], [ [[INC_3:%.*]], %[[SCALAR_REMAINDER]] ]
+; CORTEXA55-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[I_06]]
+; CORTEXA55-NEXT: [[TMP15:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CORTEXA55-NEXT: [[ADD:%.*]] = add i8 [[TMP15]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
+; CORTEXA55-NEXT: [[INC:%.*]] = add nuw i64 [[I_06]], 1
+; CORTEXA55-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC]]
+; CORTEXA55-NEXT: [[TMP16:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
+; CORTEXA55-NEXT: [[ADD_1:%.*]] = add i8 [[TMP16]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_1]], ptr [[ARRAYIDX_1]], align 1
+; CORTEXA55-NEXT: [[INC_1:%.*]] = add nuw i64 [[I_06]], 2
+; CORTEXA55-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_1]]
+; CORTEXA55-NEXT: [[TMP17:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
+; CORTEXA55-NEXT: [[ADD_2:%.*]] = add i8 [[TMP17]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_2]], ptr [[ARRAYIDX_2]], align 1
+; CORTEXA55-NEXT: [[INC_2:%.*]] = add nuw i64 [[I_06]], 3
+; CORTEXA55-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_2]]
+; CORTEXA55-NEXT: [[TMP18:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
+; CORTEXA55-NEXT: [[ADD_3:%.*]] = add i8 [[TMP18]], [[SPLAT_SCALAR]]
+; CORTEXA55-NEXT: store i8 [[ADD_3]], ptr [[ARRAYIDX_3]], align 1
+; CORTEXA55-NEXT: [[INC_3]] = add nuw i64 [[I_06]], 4
+; CORTEXA55-NEXT: [[EXITCOND_NOT_3:%.*]] = icmp eq i64 [[INC_3]], [[N]]
+; CORTEXA55-NEXT: br i1 [[EXITCOND_NOT_3]], label %[[EXIT_LOOPEXIT_UNR_LCSSA:.*]], label %[[SCALAR_REMAINDER]], !llvm.loop [[LOOP3:![0-9]+]]
+; CORTEXA55: [[EXIT_LOOPEXIT_UNR_LCSSA]]:
+; CORTEXA55-NEXT: br label %[[EXIT_LOOPEXIT]]
+; CORTEXA55: [[EXIT_LOOPEXIT]]:
+; CORTEXA55-NEXT: br label %[[EXIT]]
+; CORTEXA55: [[EXIT]]:
+; CORTEXA55-NEXT: ret void
+;
+entry:
+ %min.iters.check = icmp ult i64 %n, 32
+ br i1 %min.iters.check, label %scalar.remainder, label %vector.ph
+
+vector.ph:
+ %n.vec = and i64 %n, -32
+ %broadcast.splatinsert = insertelement <16 x i8> poison, i8 %splat.scalar, i64 0
+ %broadcast.splat = shufflevector <16 x i8> %broadcast.splatinsert, <16 x i8> poison, <16 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body:
+ %iv = phi i64 [ 0, %vector.ph ], [ %iv.next, %vector.body ]
+ %gep.p.iv = getelementptr inbounds nuw i8, ptr %p, i64 %iv
+ %gep.p.iv.16 = getelementptr inbounds nuw i8, ptr %gep.p.iv, i64 16
+ %wide.load = load <16 x i8>, ptr %gep.p.iv, align 1
+ %wide.load.2 = load <16 x i8>, ptr %gep.p.iv.16, align 1
+ %add.broadcast = add <16 x i8> %wide.load, %broadcast.splat
+ %add.broadcast.2 = add <16 x i8> %wide.load.2, %broadcast.splat
+ store <16 x i8> %add.broadcast, ptr %gep.p.iv, align 1
+ store <16 x i8> %add.broadcast.2, ptr %gep.p.iv.16, align 1
+ %iv.next = add nuw i64 %iv, 32
+ %exit.cond = icmp eq i64 %iv.next, %n.vec
+ br i1 %exit.cond, label %middle.block, label %vector.body, !llvm.loop !2
+
+middle.block:
+ %cmp.n = icmp eq i64 %n, %n.vec
+ br i1 %cmp.n, label %exit, label %scalar.remainder
+
+scalar.remainder:
+ %iv.scalar.loop = phi i64 [ %inc, %scalar.remainder ], [ %n.vec, %middle.block ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw i8, ptr %p, i64 %iv.scalar.loop
+ %scalar.load = load i8, ptr %arrayidx, align 1
+ %add = add i8 %scalar.load, %splat.scalar
+ store i8 %add, ptr %arrayidx, align 1
+ %inc = add nuw i64 %iv.scalar.loop, 1
+ %exitcond.not = icmp eq i64 %inc, %n
+ br i1 %exitcond.not, label %exit, label %scalar.remainder, !llvm.loop !3
+
+exit:
+ ret void
+}
+
+!2 = distinct !{!2, !1}
+!3 = distinct !{!3, !1}
+
;.
; APPLE: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
; APPLE: [[META1]] = !{!"llvm.loop.unroll.disable"}
; APPLE: [[LOOP2]] = distinct !{[[LOOP2]], [[META3:![0-9]+]]}
; APPLE: [[META3]] = !{!"llvm.loop.isvectorized", i32 1}
+; APPLE: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]]}
+; APPLE: [[LOOP5]] = distinct !{[[LOOP5]], [[META3]]}
;.
; CORTEXA55: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
; CORTEXA55: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CORTEXA55: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]}
+; CORTEXA55: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopUnroll/RISCV/vector.ll b/llvm/test/Transforms/LoopUnroll/RISCV/vector.ll
new file mode 100644
index 0000000..811d055
--- /dev/null
+++ b/llvm/test/Transforms/LoopUnroll/RISCV/vector.ll
@@ -0,0 +1,603 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -p loop-unroll -mtriple riscv64 -mattr=+v,+f -S %s | FileCheck %s --check-prefixes=COMMON,CHECK
+; RUN: opt -p loop-unroll -mtriple=riscv64 -mcpu=sifive-s76 -S %s | FileCheck %s --check-prefixes=COMMON,SIFIVE
+
+define void @reverse(ptr %dst, ptr %src, i64 %len) {
+; CHECK-LABEL: define void @reverse(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = sub nsw i64 [[LEN]], [[IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 16
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV]]
+; CHECK-NEXT: store <4 x float> [[TMP1]], ptr [[ARRAYIDX2]], align 16
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[LEN]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+; SIFIVE-LABEL: define void @reverse(
+; SIFIVE-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], i64 [[LEN:%.*]]) #[[ATTR0:[0-9]+]] {
+; SIFIVE-NEXT: [[ENTRY:.*]]:
+; SIFIVE-NEXT: [[TMP2:%.*]] = add i64 [[LEN]], -1
+; SIFIVE-NEXT: [[XTRAITER:%.*]] = and i64 [[LEN]], 7
+; SIFIVE-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP2]], 7
+; SIFIVE-NEXT: br i1 [[TMP3]], label %[[EXIT_UNR_LCSSA:.*]], label %[[ENTRY_NEW:.*]]
+; SIFIVE: [[ENTRY_NEW]]:
+; SIFIVE-NEXT: [[UNROLL_ITER:%.*]] = sub i64 [[LEN]], [[XTRAITER]]
+; SIFIVE-NEXT: br label %[[FOR_BODY:.*]]
+; SIFIVE: [[FOR_BODY]]:
+; SIFIVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[IV_NEXT_7:%.*]], %[[FOR_BODY]] ]
+; SIFIVE-NEXT: [[NITER:%.*]] = phi i64 [ 0, %[[ENTRY_NEW]] ], [ [[NITER_NEXT_7:%.*]], %[[FOR_BODY]] ]
+; SIFIVE-NEXT: [[TMP0:%.*]] = sub nsw i64 [[LEN]], [[IV]]
+; SIFIVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP0]]
+; SIFIVE-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV]]
+; SIFIVE-NEXT: store <4 x float> [[TMP1]], ptr [[ARRAYIDX2]], align 16
+; SIFIVE-NEXT: [[IV_NEXT:%.*]] = add nuw nsw i64 [[IV]], 1
+; SIFIVE-NEXT: [[TMP4:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT]]
+; SIFIVE-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP4]]
+; SIFIVE-NEXT: [[TMP5:%.*]] = load <4 x float>, ptr [[ARRAYIDX_1]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT]]
+; SIFIVE-NEXT: store <4 x float> [[TMP5]], ptr [[ARRAYIDX2_1]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_1:%.*]] = add nuw nsw i64 [[IV]], 2
+; SIFIVE-NEXT: [[TMP6:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_1]]
+; SIFIVE-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP6]]
+; SIFIVE-NEXT: [[TMP7:%.*]] = load <4 x float>, ptr [[ARRAYIDX_2]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_1]]
+; SIFIVE-NEXT: store <4 x float> [[TMP7]], ptr [[ARRAYIDX2_2]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_2:%.*]] = add nuw nsw i64 [[IV]], 3
+; SIFIVE-NEXT: [[TMP8:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_2]]
+; SIFIVE-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP8]]
+; SIFIVE-NEXT: [[TMP9:%.*]] = load <4 x float>, ptr [[ARRAYIDX_3]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_2]]
+; SIFIVE-NEXT: store <4 x float> [[TMP9]], ptr [[ARRAYIDX2_3]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_3:%.*]] = add nuw nsw i64 [[IV]], 4
+; SIFIVE-NEXT: [[TMP10:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_3]]
+; SIFIVE-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP10]]
+; SIFIVE-NEXT: [[TMP11:%.*]] = load <4 x float>, ptr [[ARRAYIDX_4]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_4:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_3]]
+; SIFIVE-NEXT: store <4 x float> [[TMP11]], ptr [[ARRAYIDX2_4]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_4:%.*]] = add nuw nsw i64 [[IV]], 5
+; SIFIVE-NEXT: [[TMP12:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_4]]
+; SIFIVE-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP12]]
+; SIFIVE-NEXT: [[TMP13:%.*]] = load <4 x float>, ptr [[ARRAYIDX_5]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_5:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_4]]
+; SIFIVE-NEXT: store <4 x float> [[TMP13]], ptr [[ARRAYIDX2_5]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_5:%.*]] = add nuw nsw i64 [[IV]], 6
+; SIFIVE-NEXT: [[TMP14:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_5]]
+; SIFIVE-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP14]]
+; SIFIVE-NEXT: [[TMP15:%.*]] = load <4 x float>, ptr [[ARRAYIDX_6]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_6:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_5]]
+; SIFIVE-NEXT: store <4 x float> [[TMP15]], ptr [[ARRAYIDX2_6]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_6:%.*]] = add nuw nsw i64 [[IV]], 7
+; SIFIVE-NEXT: [[TMP16:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_6]]
+; SIFIVE-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP16]]
+; SIFIVE-NEXT: [[TMP17:%.*]] = load <4 x float>, ptr [[ARRAYIDX_7]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_7:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_6]]
+; SIFIVE-NEXT: store <4 x float> [[TMP17]], ptr [[ARRAYIDX2_7]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_7]] = add nuw nsw i64 [[IV]], 8
+; SIFIVE-NEXT: [[NITER_NEXT_7]] = add i64 [[NITER]], 8
+; SIFIVE-NEXT: [[NITER_NCMP_7:%.*]] = icmp eq i64 [[NITER_NEXT_7]], [[UNROLL_ITER]]
+; SIFIVE-NEXT: br i1 [[NITER_NCMP_7]], label %[[EXIT_UNR_LCSSA_LOOPEXIT:.*]], label %[[FOR_BODY]]
+; SIFIVE: [[EXIT_UNR_LCSSA_LOOPEXIT]]:
+; SIFIVE-NEXT: [[IV_UNR_PH:%.*]] = phi i64 [ [[IV_NEXT_7]], %[[FOR_BODY]] ]
+; SIFIVE-NEXT: br label %[[EXIT_UNR_LCSSA]]
+; SIFIVE: [[EXIT_UNR_LCSSA]]:
+; SIFIVE-NEXT: [[IV_UNR:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_UNR_PH]], %[[EXIT_UNR_LCSSA_LOOPEXIT]] ]
+; SIFIVE-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0
+; SIFIVE-NEXT: br i1 [[LCMP_MOD]], label %[[FOR_BODY_EPIL_PREHEADER:.*]], label %[[EXIT:.*]]
+; SIFIVE: [[FOR_BODY_EPIL_PREHEADER]]:
+; SIFIVE-NEXT: br label %[[FOR_BODY_EPIL:.*]]
+; SIFIVE: [[FOR_BODY_EPIL]]:
+; SIFIVE-NEXT: [[TMP18:%.*]] = sub nsw i64 [[LEN]], [[IV_UNR]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP18]]
+; SIFIVE-NEXT: [[TMP19:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_UNR]]
+; SIFIVE-NEXT: store <4 x float> [[TMP19]], ptr [[ARRAYIDX2_EPIL]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL:%.*]] = add nuw nsw i64 [[IV_UNR]], 1
+; SIFIVE-NEXT: [[EPIL_ITER_CMP:%.*]] = icmp ne i64 1, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP]], label %[[FOR_BODY_EPIL_1:.*]], label %[[EXIT_EPILOG_LCSSA:.*]]
+; SIFIVE: [[FOR_BODY_EPIL_1]]:
+; SIFIVE-NEXT: [[TMP20:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_1:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP20]]
+; SIFIVE-NEXT: [[TMP21:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_1]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_1:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL]]
+; SIFIVE-NEXT: store <4 x float> [[TMP21]], ptr [[ARRAYIDX2_EPIL_1]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL_1:%.*]] = add nuw nsw i64 [[IV_UNR]], 2
+; SIFIVE-NEXT: [[EPIL_ITER_CMP_1:%.*]] = icmp ne i64 2, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP_1]], label %[[FOR_BODY_EPIL_2:.*]], label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[FOR_BODY_EPIL_2]]:
+; SIFIVE-NEXT: [[TMP22:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL_1]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_2:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP22]]
+; SIFIVE-NEXT: [[TMP23:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_2]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_2:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL_1]]
+; SIFIVE-NEXT: store <4 x float> [[TMP23]], ptr [[ARRAYIDX2_EPIL_2]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL_2:%.*]] = add nuw nsw i64 [[IV_UNR]], 3
+; SIFIVE-NEXT: [[EPIL_ITER_CMP_2:%.*]] = icmp ne i64 3, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP_2]], label %[[FOR_BODY_EPIL_3:.*]], label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[FOR_BODY_EPIL_3]]:
+; SIFIVE-NEXT: [[TMP24:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL_2]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_3:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP24]]
+; SIFIVE-NEXT: [[TMP25:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_3]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_3:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL_2]]
+; SIFIVE-NEXT: store <4 x float> [[TMP25]], ptr [[ARRAYIDX2_EPIL_3]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL_3:%.*]] = add nuw nsw i64 [[IV_UNR]], 4
+; SIFIVE-NEXT: [[EPIL_ITER_CMP_3:%.*]] = icmp ne i64 4, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP_3]], label %[[FOR_BODY_EPIL_4:.*]], label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[FOR_BODY_EPIL_4]]:
+; SIFIVE-NEXT: [[TMP26:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL_3]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_4:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP26]]
+; SIFIVE-NEXT: [[TMP27:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_4]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_4:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL_3]]
+; SIFIVE-NEXT: store <4 x float> [[TMP27]], ptr [[ARRAYIDX2_EPIL_4]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL_4:%.*]] = add nuw nsw i64 [[IV_UNR]], 5
+; SIFIVE-NEXT: [[EPIL_ITER_CMP_4:%.*]] = icmp ne i64 5, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP_4]], label %[[FOR_BODY_EPIL_5:.*]], label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[FOR_BODY_EPIL_5]]:
+; SIFIVE-NEXT: [[TMP28:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL_4]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_5:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP28]]
+; SIFIVE-NEXT: [[TMP29:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_5]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_5:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL_4]]
+; SIFIVE-NEXT: store <4 x float> [[TMP29]], ptr [[ARRAYIDX2_EPIL_5]], align 16
+; SIFIVE-NEXT: [[IV_NEXT_EPIL_5:%.*]] = add nuw nsw i64 [[IV_UNR]], 6
+; SIFIVE-NEXT: [[EPIL_ITER_CMP_5:%.*]] = icmp ne i64 6, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[EPIL_ITER_CMP_5]], label %[[FOR_BODY_EPIL_6:.*]], label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[FOR_BODY_EPIL_6]]:
+; SIFIVE-NEXT: [[TMP30:%.*]] = sub nsw i64 [[LEN]], [[IV_NEXT_EPIL_5]]
+; SIFIVE-NEXT: [[ARRAYIDX_EPIL_6:%.*]] = getelementptr inbounds <4 x float>, ptr [[SRC]], i64 [[TMP30]]
+; SIFIVE-NEXT: [[TMP31:%.*]] = load <4 x float>, ptr [[ARRAYIDX_EPIL_6]], align 16
+; SIFIVE-NEXT: [[ARRAYIDX2_EPIL_6:%.*]] = getelementptr inbounds nuw <4 x float>, ptr [[DST]], i64 [[IV_NEXT_EPIL_5]]
+; SIFIVE-NEXT: store <4 x float> [[TMP31]], ptr [[ARRAYIDX2_EPIL_6]], align 16
+; SIFIVE-NEXT: br label %[[EXIT_EPILOG_LCSSA]]
+; SIFIVE: [[EXIT_EPILOG_LCSSA]]:
+; SIFIVE-NEXT: br label %[[EXIT]]
+; SIFIVE: [[EXIT]]:
+; SIFIVE-NEXT: ret void
+;
+entry: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %1 = sub nsw i64 %len, %iv
+ %arrayidx = getelementptr inbounds <4 x float>, ptr %src, i64 %1
+ %2 = load <4 x float>, ptr %arrayidx, align 16
+ %arrayidx2 = getelementptr inbounds nuw <4 x float>, ptr %dst, i64 %iv
+ store <4 x float> %2, ptr %arrayidx2, align 16
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %len
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit: ; preds = %for.body, %entry
+ ret void
+}
+
+
+define void @saxpy_tripcount8_full_unroll(ptr %dst, ptr %src, float %a) {
+; COMMON-LABEL: define void @saxpy_tripcount8_full_unroll(
+; COMMON-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], float [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; COMMON-NEXT: [[ENTRY:.*:]]
+; COMMON-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[A]], i64 0
+; COMMON-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
+; COMMON-NEXT: br label %[[VECTOR_BODY:.*]]
+; COMMON: [[VECTOR_BODY]]:
+; COMMON-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[SRC]], align 4
+; COMMON-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[DST]], align 4
+; COMMON-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD12]])
+; COMMON-NEXT: store <4 x float> [[TMP0]], ptr [[DST]], align 4
+; COMMON-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 4
+; COMMON-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
+; COMMON-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 4
+; COMMON-NEXT: [[WIDE_LOAD12_1:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
+; COMMON-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD_1]], <4 x float> [[WIDE_LOAD12_1]])
+; COMMON-NEXT: store <4 x float> [[TMP3]], ptr [[TMP2]], align 4
+; COMMON-NEXT: ret void
+;
+entry:
+ %broadcast.splatinsert = insertelement <4 x float> poison, float %a, i64 0
+ %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds nuw float, ptr %src, i64 %index
+ %wide.load = load <4 x float>, ptr %0, align 4
+ %1 = getelementptr inbounds nuw float, ptr %dst, i64 %index
+ %wide.load12 = load <4 x float>, ptr %1, align 4
+ %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %broadcast.splat, <4 x float> %wide.load, <4 x float> %wide.load12)
+ store <4 x float> %2, ptr %1, align 4
+ %index.next = add nuw i64 %index, 4
+ %3 = icmp eq i64 %index.next, 8
+ br i1 %3, label %exit, label %vector.body
+
+exit: ; preds = %vector.body
+ ret void
+}
+
+
+define void @saxpy_tripcount1K_av0(ptr %dst, ptr %src, float %a) {
+; CHECK-LABEL: define void @saxpy_tripcount1K_av0(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], float [[A:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[A]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD12]])
+; CHECK-NEXT: store <4 x float> [[TMP2]], ptr [[TMP1]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP3]], label %[[EXIT:.*]], label %[[VECTOR_BODY]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+; SIFIVE-LABEL: define void @saxpy_tripcount1K_av0(
+; SIFIVE-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], float [[A:%.*]]) #[[ATTR0]] {
+; SIFIVE-NEXT: [[ENTRY:.*]]:
+; SIFIVE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[A]], i64 0
+; SIFIVE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
+; SIFIVE-NEXT: br label %[[VECTOR_BODY:.*]]
+; SIFIVE: [[VECTOR_BODY]]:
+; SIFIVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SIFIVE-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX]]
+; SIFIVE-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
+; SIFIVE-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX]]
+; SIFIVE-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
+; SIFIVE-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD12]])
+; SIFIVE-NEXT: store <4 x float> [[TMP2]], ptr [[TMP1]], align 4
+; SIFIVE-NEXT: [[INDEX_NEXT1:%.*]] = add nuw nsw i64 [[INDEX]], 4
+; SIFIVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX_NEXT1]]
+; SIFIVE-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x float>, ptr [[TMP12]], align 4
+; SIFIVE-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX_NEXT1]]
+; SIFIVE-NEXT: [[WIDE_LOAD12_1:%.*]] = load <4 x float>, ptr [[TMP4]], align 4
+; SIFIVE-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD_1]], <4 x float> [[WIDE_LOAD12_1]])
+; SIFIVE-NEXT: store <4 x float> [[TMP5]], ptr [[TMP4]], align 4
+; SIFIVE-NEXT: [[INDEX_NEXT_1:%.*]] = add nuw nsw i64 [[INDEX]], 8
+; SIFIVE-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX_NEXT_1]]
+; SIFIVE-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x float>, ptr [[TMP6]], align 4
+; SIFIVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX_NEXT_1]]
+; SIFIVE-NEXT: [[WIDE_LOAD12_2:%.*]] = load <4 x float>, ptr [[TMP7]], align 4
+; SIFIVE-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD_2]], <4 x float> [[WIDE_LOAD12_2]])
+; SIFIVE-NEXT: store <4 x float> [[TMP8]], ptr [[TMP7]], align 4
+; SIFIVE-NEXT: [[INDEX_NEXT_2:%.*]] = add nuw nsw i64 [[INDEX]], 12
+; SIFIVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX_NEXT_2]]
+; SIFIVE-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x float>, ptr [[TMP9]], align 4
+; SIFIVE-NEXT: [[TMP10:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX_NEXT_2]]
+; SIFIVE-NEXT: [[WIDE_LOAD12_3:%.*]] = load <4 x float>, ptr [[TMP10]], align 4
+; SIFIVE-NEXT: [[TMP11:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD_3]], <4 x float> [[WIDE_LOAD12_3]])
+; SIFIVE-NEXT: store <4 x float> [[TMP11]], ptr [[TMP10]], align 4
+; SIFIVE-NEXT: [[INDEX_NEXT]] = add nuw nsw i64 [[INDEX]], 16
+; SIFIVE-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; SIFIVE-NEXT: br i1 [[TMP3]], label %[[EXIT:.*]], label %[[VECTOR_BODY]]
+; SIFIVE: [[EXIT]]:
+; SIFIVE-NEXT: ret void
+;
+entry:
+ %broadcast.splatinsert = insertelement <4 x float> poison, float %a, i64 0
+ %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds nuw float, ptr %src, i64 %index
+ %wide.load = load <4 x float>, ptr %0, align 4
+ %1 = getelementptr inbounds nuw float, ptr %dst, i64 %index
+ %wide.load12 = load <4 x float>, ptr %1, align 4
+ %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %broadcast.splat, <4 x float> %wide.load, <4 x float> %wide.load12)
+ store <4 x float> %2, ptr %1, align 4
+ %index.next = add nuw i64 %index, 4
+ %3 = icmp eq i64 %index.next, 1024
+ br i1 %3, label %exit, label %vector.body
+
+exit: ; preds = %vector.body
+ ret void
+}
+
+
+define void @saxpy_tripcount1K_av1(ptr %dst, ptr %src, float %a) {
+; COMMON-LABEL: define void @saxpy_tripcount1K_av1(
+; COMMON-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]], float [[A:%.*]]) #[[ATTR0]] {
+; COMMON-NEXT: [[ENTRY:.*]]:
+; COMMON-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[A]], i64 0
+; COMMON-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
+; COMMON-NEXT: br label %[[VECTOR_BODY:.*]]
+; COMMON: [[VECTOR_BODY]]:
+; COMMON-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; COMMON-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[SRC]], i64 [[INDEX]]
+; COMMON-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
+; COMMON-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX]]
+; COMMON-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
+; COMMON-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD12]])
+; COMMON-NEXT: store <4 x float> [[TMP2]], ptr [[TMP1]], align 4
+; COMMON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; COMMON-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; COMMON-NEXT: br i1 [[TMP3]], label %[[EXIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; COMMON: [[EXIT]]:
+; COMMON-NEXT: ret void
+;
+entry:
+ %broadcast.splatinsert = insertelement <4 x float> poison, float %a, i64 0
+ %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds nuw float, ptr %src, i64 %index
+ %wide.load = load <4 x float>, ptr %0, align 4
+ %1 = getelementptr inbounds nuw float, ptr %dst, i64 %index
+ %wide.load12 = load <4 x float>, ptr %1, align 4
+ %2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %broadcast.splat, <4 x float> %wide.load, <4 x float> %wide.load12)
+ store <4 x float> %2, ptr %1, align 4
+ %index.next = add nuw i64 %index, 4
+ %3 = icmp eq i64 %index.next, 1024
+ br i1 %3, label %exit, label %vector.body, !llvm.loop !0
+
+exit: ; preds = %vector.body
+ ret void
+}
+!0 = !{!0, !1}
+!1 = !{!"llvm.loop.isvectorized", i32 1}
+
+; On SiFive we should runtime unroll the scalar epilogue loop, but not the
+; vector loop.
+define void @scalar_epilogue(ptr %p, i8 %splat.scalar, i64 %n) {
+; CHECK-LABEL: define void @scalar_epilogue(
+; CHECK-SAME: ptr [[P:%.*]], i8 [[SPLAT_SCALAR:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_REMAINDER_PREHEADER:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -32
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[SPLAT_SCALAR]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[IV_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[GEP_P_IV:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_P_IV_16:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_P_IV]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[GEP_P_IV]], align 1
+; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <16 x i8>, ptr [[GEP_P_IV_16]], align 1
+; CHECK-NEXT: [[ADD_BROADCAST:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[ADD_BROADCAST_2:%.*]] = add <16 x i8> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <16 x i8> [[ADD_BROADCAST]], ptr [[GEP_P_IV]], align 1
+; CHECK-NEXT: store <16 x i8> [[ADD_BROADCAST_2]], ptr [[GEP_P_IV_16]], align 1
+; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 32
+; CHECK-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[EXIT_COND]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER]]
+; CHECK: [[SCALAR_REMAINDER_PREHEADER]]:
+; CHECK-NEXT: [[IV_SCALAR_LOOP_PH:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: br label %[[SCALAR_REMAINDER:.*]]
+; CHECK: [[SCALAR_REMAINDER]]:
+; CHECK-NEXT: [[IV_SCALAR_LOOP:%.*]] = phi i64 [ [[INC:%.*]], %[[SCALAR_REMAINDER]] ], [ [[IV_SCALAR_LOOP_PH]], %[[SCALAR_REMAINDER_PREHEADER]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[IV_SCALAR_LOOP]]
+; CHECK-NEXT: [[SCALAR_LOAD:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SCALAR_LOAD]], [[SPLAT_SCALAR]]
+; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[INC]] = add nuw i64 [[IV_SCALAR_LOOP]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_REMAINDER]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[EXIT_LOOPEXIT]]:
+; CHECK-NEXT: br label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+; SIFIVE-LABEL: define void @scalar_epilogue(
+; SIFIVE-SAME: ptr [[P:%.*]], i8 [[SPLAT_SCALAR:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; SIFIVE-NEXT: [[ENTRY:.*]]:
+; SIFIVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32
+; SIFIVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_REMAINDER_PREHEADER:.*]], label %[[VECTOR_PH:.*]]
+; SIFIVE: [[VECTOR_PH]]:
+; SIFIVE-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -32
+; SIFIVE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[SPLAT_SCALAR]], i64 0
+; SIFIVE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
+; SIFIVE-NEXT: br label %[[VECTOR_BODY:.*]]
+; SIFIVE: [[VECTOR_BODY]]:
+; SIFIVE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[IV_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; SIFIVE-NEXT: [[GEP_P_IV:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[IV]]
+; SIFIVE-NEXT: [[GEP_P_IV_16:%.*]] = getelementptr inbounds nuw i8, ptr [[GEP_P_IV]], i64 16
+; SIFIVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[GEP_P_IV]], align 1
+; SIFIVE-NEXT: [[WIDE_LOAD_2:%.*]] = load <16 x i8>, ptr [[GEP_P_IV_16]], align 1
+; SIFIVE-NEXT: [[ADD_BROADCAST:%.*]] = add <16 x i8> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; SIFIVE-NEXT: [[ADD_BROADCAST_2:%.*]] = add <16 x i8> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]]
+; SIFIVE-NEXT: store <16 x i8> [[ADD_BROADCAST]], ptr [[GEP_P_IV]], align 1
+; SIFIVE-NEXT: store <16 x i8> [[ADD_BROADCAST_2]], ptr [[GEP_P_IV_16]], align 1
+; SIFIVE-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 32
+; SIFIVE-NEXT: [[EXIT_COND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
+; SIFIVE-NEXT: br i1 [[EXIT_COND]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
+; SIFIVE: [[MIDDLE_BLOCK]]:
+; SIFIVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; SIFIVE-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER]]
+; SIFIVE: [[SCALAR_REMAINDER_PREHEADER]]:
+; SIFIVE-NEXT: [[IV_SCALAR_LOOP:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[N_VEC]], %[[MIDDLE_BLOCK]] ]
+; SIFIVE-NEXT: [[TMP0:%.*]] = sub i64 [[N]], [[IV_SCALAR_LOOP]]
+; SIFIVE-NEXT: [[TMP1:%.*]] = add i64 [[N]], -1
+; SIFIVE-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], [[IV_SCALAR_LOOP]]
+; SIFIVE-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP0]], 7
+; SIFIVE-NEXT: [[LCMP_MOD:%.*]] = icmp ne i64 [[XTRAITER]], 0
+; SIFIVE-NEXT: br i1 [[LCMP_MOD]], label %[[SCALAR_REMAINDER_PROL_PREHEADER:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT:.*]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_PREHEADER]]:
+; SIFIVE-NEXT: br label %[[SCALAR_REMAINDER_PROL:.*]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL]]:
+; SIFIVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[IV_SCALAR_LOOP]]
+; SIFIVE-NEXT: [[SCALAR_LOAD:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; SIFIVE-NEXT: [[ADD:%.*]] = add i8 [[SCALAR_LOAD]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX]], align 1
+; SIFIVE-NEXT: [[INC:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 1
+; SIFIVE-NEXT: [[PROL_ITER_CMP:%.*]] = icmp ne i64 1, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP]], label %[[SCALAR_REMAINDER_PROL_1:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA:.*]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_1]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_1:%.*]] = load i8, ptr [[ARRAYIDX_PROL_1]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_1:%.*]] = add i8 [[SCALAR_LOAD_PROL_1]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_1]], ptr [[ARRAYIDX_PROL_1]], align 1
+; SIFIVE-NEXT: [[INC_PROL_1:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 2
+; SIFIVE-NEXT: [[PROL_ITER_CMP_1:%.*]] = icmp ne i64 2, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP_1]], label %[[SCALAR_REMAINDER_PROL_2:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_2]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_2:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_1]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_2:%.*]] = load i8, ptr [[ARRAYIDX_PROL_2]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_2:%.*]] = add i8 [[SCALAR_LOAD_PROL_2]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_2]], ptr [[ARRAYIDX_PROL_2]], align 1
+; SIFIVE-NEXT: [[INC_PROL_2:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 3
+; SIFIVE-NEXT: [[PROL_ITER_CMP_2:%.*]] = icmp ne i64 3, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP_2]], label %[[SCALAR_REMAINDER_PROL_3:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_3]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_3:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_2]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_3:%.*]] = load i8, ptr [[ARRAYIDX_PROL_3]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_3:%.*]] = add i8 [[SCALAR_LOAD_PROL_3]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_3]], ptr [[ARRAYIDX_PROL_3]], align 1
+; SIFIVE-NEXT: [[INC_PROL_3:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 4
+; SIFIVE-NEXT: [[PROL_ITER_CMP_3:%.*]] = icmp ne i64 4, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP_3]], label %[[SCALAR_REMAINDER_PROL_4:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_4]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_4:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_3]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_4:%.*]] = load i8, ptr [[ARRAYIDX_PROL_4]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_4:%.*]] = add i8 [[SCALAR_LOAD_PROL_4]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_4]], ptr [[ARRAYIDX_PROL_4]], align 1
+; SIFIVE-NEXT: [[INC_PROL_4:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 5
+; SIFIVE-NEXT: [[PROL_ITER_CMP_4:%.*]] = icmp ne i64 5, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP_4]], label %[[SCALAR_REMAINDER_PROL_5:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_5]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_5:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_4]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_5:%.*]] = load i8, ptr [[ARRAYIDX_PROL_5]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_5:%.*]] = add i8 [[SCALAR_LOAD_PROL_5]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_5]], ptr [[ARRAYIDX_PROL_5]], align 1
+; SIFIVE-NEXT: [[INC_PROL_5:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 6
+; SIFIVE-NEXT: [[PROL_ITER_CMP_5:%.*]] = icmp ne i64 6, [[XTRAITER]]
+; SIFIVE-NEXT: br i1 [[PROL_ITER_CMP_5]], label %[[SCALAR_REMAINDER_PROL_6:.*]], label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_6]]:
+; SIFIVE-NEXT: [[ARRAYIDX_PROL_6:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_PROL_5]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_PROL_6:%.*]] = load i8, ptr [[ARRAYIDX_PROL_6]], align 1
+; SIFIVE-NEXT: [[ADD_PROL_6:%.*]] = add i8 [[SCALAR_LOAD_PROL_6]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_PROL_6]], ptr [[ARRAYIDX_PROL_6]], align 1
+; SIFIVE-NEXT: [[INC_PROL_6:%.*]] = add nuw i64 [[IV_SCALAR_LOOP]], 7
+; SIFIVE-NEXT: br label %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]]:
+; SIFIVE-NEXT: [[IV_SCALAR_LOOP_UNR_PH:%.*]] = phi i64 [ [[INC]], %[[SCALAR_REMAINDER_PROL]] ], [ [[INC_PROL_1]], %[[SCALAR_REMAINDER_PROL_1]] ], [ [[INC_PROL_2]], %[[SCALAR_REMAINDER_PROL_2]] ], [ [[INC_PROL_3]], %[[SCALAR_REMAINDER_PROL_3]] ], [ [[INC_PROL_4]], %[[SCALAR_REMAINDER_PROL_4]] ], [ [[INC_PROL_5]], %[[SCALAR_REMAINDER_PROL_5]] ], [ [[INC_PROL_6]], %[[SCALAR_REMAINDER_PROL_6]] ]
+; SIFIVE-NEXT: br label %[[SCALAR_REMAINDER_PROL_LOOPEXIT]]
+; SIFIVE: [[SCALAR_REMAINDER_PROL_LOOPEXIT]]:
+; SIFIVE-NEXT: [[IV_SCALAR_LOOP_UNR:%.*]] = phi i64 [ [[IV_SCALAR_LOOP]], %[[SCALAR_REMAINDER_PREHEADER]] ], [ [[IV_SCALAR_LOOP_UNR_PH]], %[[SCALAR_REMAINDER_PROL_LOOPEXIT_UNR_LCSSA]] ]
+; SIFIVE-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP2]], 7
+; SIFIVE-NEXT: br i1 [[TMP3]], label %[[EXIT_LOOPEXIT:.*]], label %[[SCALAR_REMAINDER_PREHEADER_NEW:.*]]
+; SIFIVE: [[SCALAR_REMAINDER_PREHEADER_NEW]]:
+; SIFIVE-NEXT: br label %[[SCALAR_REMAINDER:.*]]
+; SIFIVE: [[SCALAR_REMAINDER]]:
+; SIFIVE-NEXT: [[IV_SCALAR_LOOP1:%.*]] = phi i64 [ [[IV_SCALAR_LOOP_UNR]], %[[SCALAR_REMAINDER_PREHEADER_NEW]] ], [ [[INC_7:%.*]], %[[SCALAR_REMAINDER]] ]
+; SIFIVE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[IV_SCALAR_LOOP1]]
+; SIFIVE-NEXT: [[SCALAR_LOAD1:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; SIFIVE-NEXT: [[ADD1:%.*]] = add i8 [[SCALAR_LOAD1]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD1]], ptr [[ARRAYIDX1]], align 1
+; SIFIVE-NEXT: [[INC1:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 1
+; SIFIVE-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC1]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_1:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
+; SIFIVE-NEXT: [[ADD_1:%.*]] = add i8 [[SCALAR_LOAD_1]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_1]], ptr [[ARRAYIDX_1]], align 1
+; SIFIVE-NEXT: [[INC_1:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 2
+; SIFIVE-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_1]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_2:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
+; SIFIVE-NEXT: [[ADD_2:%.*]] = add i8 [[SCALAR_LOAD_2]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_2]], ptr [[ARRAYIDX_2]], align 1
+; SIFIVE-NEXT: [[INC_2:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 3
+; SIFIVE-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_2]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_3:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
+; SIFIVE-NEXT: [[ADD_3:%.*]] = add i8 [[SCALAR_LOAD_3]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_3]], ptr [[ARRAYIDX_3]], align 1
+; SIFIVE-NEXT: [[INC_3:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 4
+; SIFIVE-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_3]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_4:%.*]] = load i8, ptr [[ARRAYIDX_4]], align 1
+; SIFIVE-NEXT: [[ADD_4:%.*]] = add i8 [[SCALAR_LOAD_4]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_4]], ptr [[ARRAYIDX_4]], align 1
+; SIFIVE-NEXT: [[INC_4:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 5
+; SIFIVE-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_4]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_5:%.*]] = load i8, ptr [[ARRAYIDX_5]], align 1
+; SIFIVE-NEXT: [[ADD_5:%.*]] = add i8 [[SCALAR_LOAD_5]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_5]], ptr [[ARRAYIDX_5]], align 1
+; SIFIVE-NEXT: [[INC_5:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 6
+; SIFIVE-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_5]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_6:%.*]] = load i8, ptr [[ARRAYIDX_6]], align 1
+; SIFIVE-NEXT: [[ADD_6:%.*]] = add i8 [[SCALAR_LOAD_6]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_6]], ptr [[ARRAYIDX_6]], align 1
+; SIFIVE-NEXT: [[INC_6:%.*]] = add nuw i64 [[IV_SCALAR_LOOP1]], 7
+; SIFIVE-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds nuw i8, ptr [[P]], i64 [[INC_6]]
+; SIFIVE-NEXT: [[SCALAR_LOAD_7:%.*]] = load i8, ptr [[ARRAYIDX_7]], align 1
+; SIFIVE-NEXT: [[ADD_7:%.*]] = add i8 [[SCALAR_LOAD_7]], [[SPLAT_SCALAR]]
+; SIFIVE-NEXT: store i8 [[ADD_7]], ptr [[ARRAYIDX_7]], align 1
+; SIFIVE-NEXT: [[INC_7]] = add nuw i64 [[IV_SCALAR_LOOP1]], 8
+; SIFIVE-NEXT: [[EXITCOND_NOT_7:%.*]] = icmp eq i64 [[INC_7]], [[N]]
+; SIFIVE-NEXT: br i1 [[EXITCOND_NOT_7]], label %[[EXIT_LOOPEXIT_UNR_LCSSA:.*]], label %[[SCALAR_REMAINDER]], !llvm.loop [[LOOP3:![0-9]+]]
+; SIFIVE: [[EXIT_LOOPEXIT_UNR_LCSSA]]:
+; SIFIVE-NEXT: br label %[[EXIT_LOOPEXIT]]
+; SIFIVE: [[EXIT_LOOPEXIT]]:
+; SIFIVE-NEXT: br label %[[EXIT]]
+; SIFIVE: [[EXIT]]:
+; SIFIVE-NEXT: ret void
+;
+entry:
+ %min.iters.check = icmp ult i64 %n, 32
+ br i1 %min.iters.check, label %scalar.remainder, label %vector.ph
+
+vector.ph:
+ %n.vec = and i64 %n, -32
+ %broadcast.splatinsert = insertelement <16 x i8> poison, i8 %splat.scalar, i64 0
+ %broadcast.splat = shufflevector <16 x i8> %broadcast.splatinsert, <16 x i8> poison, <16 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body:
+ %iv = phi i64 [ 0, %vector.ph ], [ %iv.next, %vector.body ]
+ %gep.p.iv = getelementptr inbounds nuw i8, ptr %p, i64 %iv
+ %gep.p.iv.16 = getelementptr inbounds nuw i8, ptr %gep.p.iv, i64 16
+ %wide.load = load <16 x i8>, ptr %gep.p.iv, align 1
+ %wide.load.2 = load <16 x i8>, ptr %gep.p.iv.16, align 1
+ %add.broadcast = add <16 x i8> %wide.load, %broadcast.splat
+ %add.broadcast.2 = add <16 x i8> %wide.load.2, %broadcast.splat
+ store <16 x i8> %add.broadcast, ptr %gep.p.iv, align 1
+ store <16 x i8> %add.broadcast.2, ptr %gep.p.iv.16, align 1
+ %iv.next = add nuw i64 %iv, 32
+ %exit.cond = icmp eq i64 %iv.next, %n.vec
+ br i1 %exit.cond, label %middle.block, label %vector.body, !llvm.loop !2
+
+middle.block:
+ %cmp.n = icmp eq i64 %n, %n.vec
+ br i1 %cmp.n, label %exit, label %scalar.remainder
+
+scalar.remainder:
+ %iv.scalar.loop = phi i64 [ %inc, %scalar.remainder ], [ %n.vec, %middle.block ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw i8, ptr %p, i64 %iv.scalar.loop
+ %scalar.load = load i8, ptr %arrayidx, align 1
+ %add = add i8 %scalar.load, %splat.scalar
+ store i8 %add, ptr %arrayidx, align 1
+ %inc = add nuw i64 %iv.scalar.loop, 1
+ %exitcond.not = icmp eq i64 %inc, %n
+ br i1 %exitcond.not, label %exit, label %scalar.remainder, !llvm.loop !3
+
+exit:
+ ret void
+}
+
+!2 = distinct !{!2, !1}
+!3 = distinct !{!3, !1}
+
+;.
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
+; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
+;.
+; SIFIVE: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
+; SIFIVE: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; SIFIVE: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]}
+; SIFIVE: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll
index 011b823..a7ec749 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-reduction-inloop-cond.ll
@@ -27,7 +27,7 @@ define float @cond_fadd(ptr noalias nocapture readonly %a, ptr noalias nocapture
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x float> @llvm.masked.load.nxv4f32.p0(ptr [[TMP10]], i32 4, <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> poison)
; CHECK-NEXT: [[TMP12:%.*]] = select fast <vscale x 4 x i1> [[TMP9]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x float> zeroinitializer
; CHECK-NEXT: [[TMP13:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP12]])
-; CHECK-NEXT: [[TMP14]] = fadd fast float [[TMP13]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP14]] = fadd fast float [[VEC_PHI]], [[TMP13]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll
index 3d81541..e555785 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-epilog-vect-inloop-reductions.ll
@@ -33,9 +33,9 @@ define i64 @int_reduction_and(ptr noalias nocapture %a, i64 %N) {
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP10]], align 8
; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x i64>, ptr [[TMP15]], align 8
; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP17]] = and i64 [[TMP16]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP17]] = and i64 [[VEC_PHI]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64> [[WIDE_LOAD3]])
-; CHECK-NEXT: [[TMP19]] = and i64 [[TMP18]], [[VEC_PHI2]]
+; CHECK-NEXT: [[TMP19]] = and i64 [[VEC_PHI2]], [[TMP18]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP21]]
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -59,7 +59,7 @@ define i64 @int_reduction_and(ptr noalias nocapture %a, i64 %N) {
; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX7]]
; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <2 x i64>, ptr [[TMP24]], align 8
; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> [[WIDE_LOAD9]])
-; CHECK-NEXT: [[TMP27]] = and i64 [[TMP26]], [[VEC_PHI8]]
+; CHECK-NEXT: [[TMP27]] = and i64 [[VEC_PHI8]], [[TMP26]]
; CHECK-NEXT: [[INDEX_NEXT10]] = add nuw i64 [[INDEX7]], 2
; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT10]], [[N_VEC5]]
; CHECK-NEXT: br i1 [[TMP28]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
index fc86e3a..f4982e6 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
@@ -88,7 +88,7 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 {
; CHECK-IN-LOOP-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP11]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
; CHECK-IN-LOOP-NEXT: [[TMP13:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x i32> zeroinitializer
; CHECK-IN-LOOP-NEXT: [[TMP14:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP13]])
-; CHECK-IN-LOOP-NEXT: [[TMP15]] = add i32 [[TMP14]], [[VEC_PHI]]
+; CHECK-IN-LOOP-NEXT: [[TMP15]] = add i32 [[VEC_PHI]], [[TMP14]]
; CHECK-IN-LOOP-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP17]]
; CHECK-IN-LOOP-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-IN-LOOP-NEXT: [[TMP18:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
@@ -349,7 +349,7 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 {
; CHECK-IN-LOOP-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP14]], i32 4, <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> poison)
; CHECK-IN-LOOP-NEXT: [[TMP17:%.*]] = select <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD1]], <vscale x 4 x i32> zeroinitializer
; CHECK-IN-LOOP-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP17]])
-; CHECK-IN-LOOP-NEXT: [[TMP19]] = xor i32 [[TMP18]], [[VEC_PHI]]
+; CHECK-IN-LOOP-NEXT: [[TMP19]] = xor i32 [[VEC_PHI]], [[TMP18]]
; CHECK-IN-LOOP-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP21]]
; CHECK-IN-LOOP-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; CHECK-IN-LOOP-NEXT: [[TMP22:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll
index bc02595..9f2c70e 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-predselect.ll
@@ -17,7 +17,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP0]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT: [[TMP2]] = add i32 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2]] = add i32 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -66,11 +66,11 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP1]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[VEC_IND]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TMP4]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD1]])
-; CHECK-NEXT: [[TMP8]] = add i32 [[TMP7]], [[TMP6]]
+; CHECK-NEXT: [[TMP8]] = add i32 [[TMP6]], [[TMP7]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], 260
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
index f1bee3b..83cb325 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
@@ -29,7 +29,7 @@ define i32 @mla_i32(ptr noalias nocapture readonly %A, ptr noalias nocapture rea
; CHECK-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP4]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP6]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP7]])
-; CHECK-NEXT: [[TMP10]] = add i32 [[TMP9]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP10]] = add i32 [[VEC_PHI]], [[TMP9]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -109,7 +109,7 @@ define i32 @mla_i8(ptr noalias nocapture readonly %A, ptr noalias nocapture read
; CHECK-NEXT: [[TMP7:%.*]] = mul nsw <16 x i32> [[TMP6]], [[TMP3]]
; CHECK-NEXT: [[TMP8:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP7]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP8]])
-; CHECK-NEXT: [[TMP10]] = add i32 [[TMP9]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP10]] = add i32 [[VEC_PHI]], [[TMP9]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -184,7 +184,7 @@ define i32 @add_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP1]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> poison)
; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[WIDE_MASKED_LOAD]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i32 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll
index e27b028..27e87bc 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions-interleave.ll
@@ -23,10 +23,10 @@ define i64 @add_i32_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
-; CHECK-NEXT: [[TMP3]] = add i64 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = add i64 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP9:%.*]] = sext <4 x i32> [[WIDE_LOAD2]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP9]])
-; CHECK-NEXT: [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP7]] = add i64 [[VEC_PHI1]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -96,10 +96,10 @@ define i64 @mla_i32_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP14:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD4]], [[WIDE_LOAD2]]
; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i32> [[TMP2]] to <4 x i64>
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i64 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[TMP9:%.*]] = sext <4 x i32> [[TMP14]] to <4 x i64>
; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP9]])
-; CHECK-NEXT: [[TMP11]] = add i64 [[TMP10]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP11]] = add i64 [[VEC_PHI1]], [[TMP10]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
index ddd334d..658b9a4 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
@@ -62,7 +62,7 @@ define i64 @add_i32_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
-; CHECK-NEXT: [[TMP3]] = add i64 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = add i64 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -127,7 +127,7 @@ define i64 @add_i16_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP0]], align 2
; CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i16> [[WIDE_LOAD]] to <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
-; CHECK-NEXT: [[TMP3]] = add i64 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = add i64 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -192,7 +192,7 @@ define i64 @add_i8_i64(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP0]], align 1
; CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i8> [[WIDE_LOAD]] to <4 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP1]])
-; CHECK-NEXT: [[TMP3]] = add i64 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = add i64 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -254,7 +254,7 @@ define i32 @add_i32_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP0]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> zeroinitializer)
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT: [[TMP2]] = add i32 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2]] = add i32 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
@@ -300,7 +300,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_MASKED_LOAD]] to <8 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i32> [[TMP1]], <8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
@@ -347,7 +347,7 @@ define i32 @add_i8_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP1]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
@@ -392,7 +392,7 @@ define signext i16 @add_i16_i16(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i16> @llvm.masked.load.v8i16.p0(ptr [[TMP0]], i32 2, <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> zeroinitializer)
; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT: [[TMP2]] = add i16 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2]] = add i16 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
@@ -438,7 +438,7 @@ define signext i16 @add_i8_i16(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i16>
; CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i16> [[TMP1]], <16 x i16> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = add i16 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i16 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
@@ -483,7 +483,7 @@ define zeroext i8 @add_i8_i8(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[X:%.*]], i32 [[INDEX]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0(ptr [[TMP0]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> zeroinitializer)
; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> [[WIDE_MASKED_LOAD]])
-; CHECK-NEXT: [[TMP2]] = add i8 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2]] = add i8 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
@@ -577,7 +577,7 @@ define i64 @mla_i32_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP3:%.*]] = sext <4 x i32> [[TMP2]] to <4 x i64>
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i64 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
@@ -652,7 +652,7 @@ define i64 @mla_i16_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP3:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
; CHECK-NEXT: [[TMP5:%.*]] = mul nsw <8 x i64> [[TMP4]], [[TMP3]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i64 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
@@ -731,7 +731,7 @@ define i64 @mla_i8_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i32
; CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i64>
; CHECK-NEXT: [[TMP5:%.*]] = mul nuw nsw <8 x i64> [[TMP4]], [[TMP3]]
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i64 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
@@ -807,7 +807,7 @@ define i32 @mla_i32_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_MASKED_LOAD2]], [[WIDE_MASKED_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP2]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i32 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
@@ -860,7 +860,7 @@ define i32 @mla_i16_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, i3
; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <8 x i32> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i32> [[TMP4]], <8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
@@ -915,7 +915,7 @@ define i32 @mla_i8_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, i32
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw nsw <16 x i32> [[TMP2]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP4]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
@@ -968,7 +968,7 @@ define signext i16 @mla_i16_i16(ptr nocapture readonly %x, ptr nocapture readonl
; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i16> [[WIDE_MASKED_LOAD2]], [[WIDE_MASKED_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i16> [[TMP2]], <8 x i16> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i16 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i16 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
@@ -1021,7 +1021,7 @@ define signext i16 @mla_i8_i16(ptr nocapture readonly %x, ptr nocapture readonly
; CHECK-NEXT: [[TMP4:%.*]] = mul nuw <16 x i16> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i16> [[TMP4]], <16 x i16> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i16 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i16 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
@@ -1074,7 +1074,7 @@ define zeroext i8 @mla_i8_i8(ptr nocapture readonly %x, ptr nocapture readonly %
; CHECK-NEXT: [[TMP2:%.*]] = mul <16 x i8> [[WIDE_MASKED_LOAD2]], [[WIDE_MASKED_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> [[TMP2]], <16 x i8> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i8 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i8 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
@@ -1127,7 +1127,7 @@ define i32 @red_mla_ext_s8_s16_s32(ptr noalias nocapture readonly %A, ptr noalia
; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <8 x i32> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = select <8 x i1> [[ACTIVE_LANE_MASK]], <8 x i32> [[TMP4]], <8 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
@@ -1190,7 +1190,7 @@ define i64 @red_mla_ext_s16_u16_s64(ptr noalias nocapture readonly %A, ptr noali
; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i32> [[TMP4]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i64 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
@@ -1275,7 +1275,7 @@ define i32 @red_mla_u8_s8_u32(ptr noalias nocapture readonly %A, ptr noalias noc
; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> [[TMP4]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
@@ -1339,9 +1339,9 @@ define i32 @reduction_interleave_group(i32 %n, ptr %arr) #0 {
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[STRIDED_VEC1]])
-; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[STRIDED_VEC]])
-; CHECK-NEXT: [[TMP9]] = add i32 [[TMP8]], [[TMP7]]
+; CHECK-NEXT: [[TMP9]] = add i32 [[TMP7]], [[TMP8]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
@@ -1412,7 +1412,7 @@ define i32 @mla_i8_i32_multiuse(ptr nocapture readonly %x, ptr nocapture readonl
; CHECK-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i32> [[TMP7]], [[TMP7]]
; CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP2]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i32 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_COND_CLEANUP]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
@@ -1460,7 +1460,7 @@ define i64 @mla_xx_sext_zext(ptr nocapture noundef readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
; CHECK-NEXT: [[TMP3:%.*]] = mul nsw <8 x i64> [[TMP1]], [[TMP1]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP3]])
-; CHECK-NEXT: [[TMP5]] = add i64 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i64 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
@@ -1528,10 +1528,10 @@ define i64 @mla_and_add_together_16_64(ptr nocapture noundef readonly %x, i32 no
; CHECK-NEXT: [[TMP4:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <8 x i64> [[TMP4]], [[TMP4]]
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP2]])
-; CHECK-NEXT: [[TMP5]] = add i64 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i64 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[TMP10:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32>
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP10]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI1]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
@@ -1678,11 +1678,11 @@ define i64 @test_std_q31(ptr %x, i32 %n) #0 {
; CHECK-NEXT: [[TMP1:%.*]] = ashr <4 x i32> [[WIDE_LOAD]], splat (i32 8)
; CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i32> [[TMP1]] to <4 x i64>
; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = add i64 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i64 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[TMP5:%.*]] = sext <4 x i32> [[TMP1]] to <4 x i64>
; CHECK-NEXT: [[TMP6:%.*]] = mul nsw <4 x i64> [[TMP5]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP6]])
-; CHECK-NEXT: [[TMP8]] = add i64 [[TMP7]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP8]] = add i64 [[VEC_PHI1]], [[TMP7]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY1]], !llvm.loop [[LOOP37:![0-9]+]]
@@ -1770,12 +1770,12 @@ define i64 @test_fir_q15(ptr %x, ptr %y, i32 %n) #0 {
; CHECK-NEXT: [[TMP7:%.*]] = sext <8 x i16> [[STRIDED_VEC]] to <8 x i64>
; CHECK-NEXT: [[TMP8:%.*]] = mul nsw <8 x i64> [[TMP6]], [[TMP7]]
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP8]])
-; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[TMP9]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[VEC_PHI]], [[TMP9]]
; CHECK-NEXT: [[TMP11:%.*]] = sext <8 x i16> [[STRIDED_VEC4]] to <8 x i64>
; CHECK-NEXT: [[TMP12:%.*]] = sext <8 x i16> [[STRIDED_VEC1]] to <8 x i64>
; CHECK-NEXT: [[TMP13:%.*]] = mul nsw <8 x i64> [[TMP11]], [[TMP12]]
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP13]])
-; CHECK-NEXT: [[TMP16]] = add i64 [[TMP15]], [[TMP10]]
+; CHECK-NEXT: [[TMP16]] = add i64 [[TMP10]], [[TMP15]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
index 21266e5..162440a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/bf16.ll
@@ -1,6 +1,6 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S | FileCheck %s -check-prefix=NO-ZVFBFMIN
-; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFBFMIN-PREDICATED
+; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFBFMIN
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfbfmin -S | FileCheck %s -check-prefix=ZVFBFMIN
define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
@@ -22,24 +22,6 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; NO-ZVFBFMIN: [[EXIT]]:
; NO-ZVFBFMIN-NEXT: ret void
;
-; NO-ZVFBFMIN-PREDICATED-LABEL: define void @fadd(
-; NO-ZVFBFMIN-PREDICATED-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[ENTRY:.*]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: br label %[[LOOP:.*]]
-; NO-ZVFBFMIN-PREDICATED: [[LOOP]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[I:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[Y:%.*]] = load bfloat, ptr [[B_GEP]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[Z:%.*]] = fadd bfloat [[X]], [[Y]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: store bfloat [[Z]], ptr [[A_GEP]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[I_NEXT]] = add i64 [[I]], 1
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]]
-; NO-ZVFBFMIN-PREDICATED: [[EXIT]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: ret void
-;
; ZVFBFMIN-LABEL: define void @fadd(
; ZVFBFMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; ZVFBFMIN-NEXT: [[ENTRY:.*]]:
@@ -152,54 +134,6 @@ define void @vfwmaccbf16.vv(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64
; NO-ZVFBFMIN: [[EXIT]]:
; NO-ZVFBFMIN-NEXT: ret void
;
-; NO-ZVFBFMIN-PREDICATED-LABEL: define void @vfwmaccbf16.vv(
-; NO-ZVFBFMIN-PREDICATED-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[ENTRY:.*]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; NO-ZVFBFMIN-PREDICATED: [[VECTOR_PH]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br label %[[VECTOR_BODY:.*]]
-; NO-ZVFBFMIN-PREDICATED: [[VECTOR_BODY]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[I:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[A_GEP:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[B_GEP:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[C_GEP:%.*]] = getelementptr float, ptr [[C]], i64 [[I]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <4 x bfloat>, ptr [[A_GEP]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = load <4 x bfloat>, ptr [[B_GEP]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = load <4 x float>, ptr [[C_GEP]], align 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP4:%.*]] = fpext <4 x bfloat> [[WIDE_MASKED_LOAD]] to <4 x float>
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP5:%.*]] = fpext <4 x bfloat> [[WIDE_MASKED_LOAD3]] to <4 x float>
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> [[TMP4]], <4 x float> [[TMP5]], <4 x float> [[WIDE_MASKED_LOAD4]])
-; NO-ZVFBFMIN-PREDICATED-NEXT: store <4 x float> [[TMP6]], ptr [[C_GEP]], align 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[I]], 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; NO-ZVFBFMIN-PREDICATED: [[MIDDLE_BLOCK]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
-; NO-ZVFBFMIN-PREDICATED: [[SCALAR_PH]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br label %[[LOOP:.*]]
-; NO-ZVFBFMIN-PREDICATED: [[LOOP]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[I1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[A_GEP1:%.*]] = getelementptr bfloat, ptr [[A]], i64 [[I1]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[B_GEP1:%.*]] = getelementptr bfloat, ptr [[B]], i64 [[I1]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[C_GEP1:%.*]] = getelementptr float, ptr [[C]], i64 [[I1]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[X:%.*]] = load bfloat, ptr [[A_GEP1]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[Y:%.*]] = load bfloat, ptr [[B_GEP1]], align 2
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[Z:%.*]] = load float, ptr [[C_GEP1]], align 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[X_EXT:%.*]] = fpext bfloat [[X]] to float
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[Y_EXT:%.*]] = fpext bfloat [[Y]] to float
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[FMULADD:%.*]] = call float @llvm.fmuladd.f32(float [[X_EXT]], float [[Y_EXT]], float [[Z]])
-; NO-ZVFBFMIN-PREDICATED-NEXT: store float [[FMULADD]], ptr [[C_GEP1]], align 4
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[I_NEXT]] = add i64 [[I1]], 1
-; NO-ZVFBFMIN-PREDICATED-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; NO-ZVFBFMIN-PREDICATED-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
-; NO-ZVFBFMIN-PREDICATED: [[EXIT]]:
-; NO-ZVFBFMIN-PREDICATED-NEXT: ret void
-;
; ZVFBFMIN-LABEL: define void @vfwmaccbf16.vv(
; ZVFBFMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; ZVFBFMIN-NEXT: [[ENTRY:.*]]:
@@ -274,21 +208,3 @@ loop:
exit:
ret void
}
-;.
-; NO-ZVFBFMIN: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; NO-ZVFBFMIN: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; NO-ZVFBFMIN: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; NO-ZVFBFMIN: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-;.
-; NO-ZVFBFMIN-PREDICATED: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; NO-ZVFBFMIN-PREDICATED: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; NO-ZVFBFMIN-PREDICATED: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; NO-ZVFBFMIN-PREDICATED: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-;.
-; ZVFBFMIN: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; ZVFBFMIN: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; ZVFBFMIN: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; ZVFBFMIN: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-; ZVFBFMIN: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
-; ZVFBFMIN: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
-;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
index f654238..5f13089 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
@@ -25,7 +25,7 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) {
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[TMP13:%.*]] = mul i64 1, [[TMP12]]
@@ -35,6 +35,7 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VEC_IND]], ptr align 8 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
index 53e43e1..effaf57 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/f16.ll
@@ -1,6 +1,6 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S | FileCheck %s -check-prefix=NO-ZVFHMIN
-; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFHMIN-PREDICATED
+; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v -S -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue | FileCheck %s -check-prefix=NO-ZVFHMIN
; RUN: opt < %s -passes=loop-vectorize -mtriple riscv64 -mattr=+v,+zvfhmin -S | FileCheck %s -check-prefix=ZVFHMIN
define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
@@ -22,24 +22,6 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; NO-ZVFHMIN: [[EXIT]]:
; NO-ZVFHMIN-NEXT: ret void
;
-; NO-ZVFHMIN-PREDICATED-LABEL: define void @fadd(
-; NO-ZVFHMIN-PREDICATED-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; NO-ZVFHMIN-PREDICATED-NEXT: [[ENTRY:.*]]:
-; NO-ZVFHMIN-PREDICATED-NEXT: br label %[[LOOP:.*]]
-; NO-ZVFHMIN-PREDICATED: [[LOOP]]:
-; NO-ZVFHMIN-PREDICATED-NEXT: [[I:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
-; NO-ZVFHMIN-PREDICATED-NEXT: [[A_GEP:%.*]] = getelementptr half, ptr [[A]], i64 [[I]]
-; NO-ZVFHMIN-PREDICATED-NEXT: [[B_GEP:%.*]] = getelementptr half, ptr [[B]], i64 [[I]]
-; NO-ZVFHMIN-PREDICATED-NEXT: [[X:%.*]] = load half, ptr [[A_GEP]], align 2
-; NO-ZVFHMIN-PREDICATED-NEXT: [[Y:%.*]] = load half, ptr [[B_GEP]], align 2
-; NO-ZVFHMIN-PREDICATED-NEXT: [[Z:%.*]] = fadd half [[X]], [[Y]]
-; NO-ZVFHMIN-PREDICATED-NEXT: store half [[Z]], ptr [[A_GEP]], align 2
-; NO-ZVFHMIN-PREDICATED-NEXT: [[I_NEXT]] = add i64 [[I]], 1
-; NO-ZVFHMIN-PREDICATED-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
-; NO-ZVFHMIN-PREDICATED-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]]
-; NO-ZVFHMIN-PREDICATED: [[EXIT]]:
-; NO-ZVFHMIN-PREDICATED-NEXT: ret void
-;
; ZVFHMIN-LABEL: define void @fadd(
; ZVFHMIN-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; ZVFHMIN-NEXT: [[ENTRY:.*]]:
@@ -86,6 +68,23 @@ define void @fadd(ptr noalias %a, ptr noalias %b, i64 %n) {
; ZVFHMIN: [[EXIT]]:
; ZVFHMIN-NEXT: ret void
;
+; NO-ZVFHMIN-PREDICATED-LABEL: define void @fadd(
+; NO-ZVFHMIN-PREDICATED-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; NO-ZVFHMIN-PREDICATED-NEXT: [[ENTRY:.*]]:
+; NO-ZVFHMIN-PREDICATED-NEXT: br label %[[LOOP:.*]]
+; NO-ZVFHMIN-PREDICATED: [[LOOP]]:
+; NO-ZVFHMIN-PREDICATED-NEXT: [[I:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
+; NO-ZVFHMIN-PREDICATED-NEXT: [[A_GEP:%.*]] = getelementptr half, ptr [[A]], i64 [[I]]
+; NO-ZVFHMIN-PREDICATED-NEXT: [[B_GEP:%.*]] = getelementptr half, ptr [[B]], i64 [[I]]
+; NO-ZVFHMIN-PREDICATED-NEXT: [[X:%.*]] = load half, ptr [[A_GEP]], align 2
+; NO-ZVFHMIN-PREDICATED-NEXT: [[Y:%.*]] = load half, ptr [[B_GEP]], align 2
+; NO-ZVFHMIN-PREDICATED-NEXT: [[Z:%.*]] = fadd half [[X]], [[Y]]
+; NO-ZVFHMIN-PREDICATED-NEXT: store half [[Z]], ptr [[A_GEP]], align 2
+; NO-ZVFHMIN-PREDICATED-NEXT: [[I_NEXT]] = add i64 [[I]], 1
+; NO-ZVFHMIN-PREDICATED-NEXT: [[DONE:%.*]] = icmp eq i64 [[I_NEXT]], [[N]]
+; NO-ZVFHMIN-PREDICATED-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-ZVFHMIN-PREDICATED: [[EXIT]]:
+; NO-ZVFHMIN-PREDICATED-NEXT: ret void
entry:
br label %loop
loop:
@@ -102,9 +101,3 @@ loop:
exit:
ret void
}
-;.
-; ZVFHMIN: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; ZVFHMIN: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; ZVFHMIN: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; ZVFHMIN: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
index 8a2ff1b..6e2434a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
@@ -85,7 +85,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; INLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i16>, ptr [[TMP7]], align 2
; INLOOP-NEXT: [[TMP9:%.*]] = sext <vscale x 8 x i16> [[WIDE_LOAD]] to <vscale x 8 x i32>
; INLOOP-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[TMP9]])
-; INLOOP-NEXT: [[TMP11]] = add i32 [[TMP10]], [[VEC_PHI]]
+; INLOOP-NEXT: [[TMP11]] = add i32 [[VEC_PHI]], [[TMP10]]
; INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]]
; INLOOP-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; INLOOP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -132,7 +132,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-OUTLOOP: vector.body:
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = sub i32 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i32 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[EVL_BASED_IV]]
; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i16> @llvm.vp.load.nxv4i16.p0(ptr align 2 [[TMP7]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]])
@@ -140,6 +140,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-OUTLOOP-NEXT: [[VP_OP:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP9]]
; IF-EVL-OUTLOOP-NEXT: [[TMP10]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP5]])
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP5]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP5]]
; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
@@ -185,7 +186,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-INLOOP: vector.body:
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[TMP5:%.*]] = sub i32 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[TMP5:%.*]] = phi i32 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[TMP5]], i32 8, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0(ptr align 2 [[TMP8]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
@@ -193,6 +194,7 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = call i32 @llvm.vp.reduce.add.nxv8i32(i32 0, <vscale x 8 x i32> [[TMP14]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-INLOOP-NEXT: [[TMP11]] = add i32 [[TMP10]], [[VEC_PHI]]
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP6]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i32 [[TMP5]], [[TMP6]]
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
@@ -350,7 +352,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP: vector.body:
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -359,6 +361,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[TMP15]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP14]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-OUTLOOP-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-OUTLOOP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
@@ -398,7 +401,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP: vector.body:
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -406,6 +409,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smin.i32(i32 [[TMP13]], i32 [[VEC_PHI]])
; IF-EVL-INLOOP-NEXT: [[TMP14:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP14]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; IF-EVL-INLOOP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
index 02eee7a..6f20376 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
@@ -117,7 +117,7 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-WITH-EVL: vector.body:
; PREDICATED_DATA-WITH-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP0]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL:%.*]] = sub i32 1024, [[EVL_BASED_IV]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL:%.*]] = phi i32 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true)
; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0
; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
@@ -139,6 +139,7 @@ define void @masked_strided_factor2(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP13]]
; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP12]], <vscale x 16 x ptr> align 1 [[TMP14]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP1]], [[EVL_BASED_IV]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP1]]
; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP15:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 1024
; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -325,7 +326,7 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-WITH-EVL: vector.body:
; PREDICATED_DATA-WITH-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 16 x i32> [ [[TMP0]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL:%.*]] = sub i32 1024, [[EVL_BASED_IV]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL:%.*]] = phi i32 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 16, i1 true)
; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP1]], i64 0
; PREDICATED_DATA-WITH-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 16 x i32> [[BROADCAST_SPLATINSERT1]], <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
@@ -363,6 +364,7 @@ define void @masked_strided_factor4(ptr noalias nocapture readonly %p, ptr noali
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[Q]], <vscale x 16 x i64> [[TMP25]]
; PREDICATED_DATA-WITH-EVL-NEXT: call void @llvm.vp.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> [[TMP18]], <vscale x 16 x ptr> align 1 [[TMP26]], <vscale x 16 x i1> [[TMP2]], i32 [[TMP1]])
; PREDICATED_DATA-WITH-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP1]], [[EVL_BASED_IV]]
+; PREDICATED_DATA-WITH-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP1]]
; PREDICATED_DATA-WITH-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 16 x i32> [[VEC_IND]], [[BROADCAST_SPLAT2]]
; PREDICATED_DATA-WITH-EVL-NEXT: [[TMP27:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 1024
; PREDICATED_DATA-WITH-EVL-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-store-with-gap.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-store-with-gap.ll
new file mode 100644
index 0000000..c5396f2
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-store-with-gap.ll
@@ -0,0 +1,59 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
+; RUN: opt -mtriple=riscv64 -mattr=+v -passes=loop-vectorize \
+; RUN: -scalable-vectorization=off -enable-masked-interleaved-mem-accesses \
+; RUN: -force-vector-interleave=1 -riscv-v-vector-bits-min=1024 -S < %s | FileCheck %s
+
+define void @store_factor_2_with_tail_gap(i64 %n, ptr %a) {
+; CHECK-LABEL: define void @store_factor_2_with_tail_gap(
+; CHECK-SAME: i64 [[N:%.*]], ptr [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 16
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 16
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <16 x i64> [ <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = shl nsw i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i64> [[VEC_IND]], <16 x i64> poison, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <32 x i64> [[TMP2]], <32 x i64> poison, <32 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
+; CHECK-NEXT: call void @llvm.masked.store.v32i64.p0(<32 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], i32 8, <32 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>)
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16)
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = shl nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP4]]
+; CHECK-NEXT: store i64 [[INDVARS_IV]], ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %0 = shl nsw i64 %iv, 1
+ %arrayidx = getelementptr inbounds i64, ptr %a, i64 %0
+ store i64 %iv, ptr %arrayidx, align 8
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
index b82b7f3..01df436 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
@@ -32,7 +32,7 @@ define void @test(ptr %p, i64 %a, i8 %b) {
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_COND]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_COND]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i32 9, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 9, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[FOR_COND]] ]
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[TMP11]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <vscale x 2 x i32> [[BROADCAST_SPLATINSERT5]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
@@ -48,6 +48,7 @@ define void @test(ptr %p, i64 %a, i8 %b) {
; CHECK-NEXT: [[TMP17:%.*]] = trunc <vscale x 2 x i32> [[TMP16]] to <vscale x 2 x i8>
; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i8.nxv2p0(<vscale x 2 x i8> [[TMP17]], <vscale x 2 x ptr> align 1 [[BROADCAST_SPLAT4]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP11]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP11]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i32> [[VEC_IND]], [[BROADCAST_SPLAT8]]
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 9
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_COND]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll
new file mode 100644
index 0000000..554ce7b
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/reductions.ll
@@ -0,0 +1,1481 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
+; RUN: opt < %s -p loop-vectorize -mtriple riscv64 -mattr=+v -S | FileCheck %s
+
+; Reduction can be vectorized
+
+; ADD
+
+define i32 @add(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @add(
+; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP7]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP10]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[ADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi i32 [ 2, %entry ], [ %add, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %add = add nsw i32 %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret i32 %add
+}
+
+; OR
+
+define i32 @or(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @or(
+; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7]] = or <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[TMP7]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[OR:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[OR]] = or i32 [[TMP10]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[OR_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi i32 [ 2, %entry ], [ %or, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %or = or i32 %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret i32 %or
+}
+
+; AND
+
+define i32 @and(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @and(
+; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> splat (i32 -1), i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7]] = and <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[TMP7]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[AND:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[AND]] = and i32 [[TMP10]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[AND_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi i32 [ 2, %entry ], [ %and, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %and = and i32 %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret i32 %and
+}
+
+; XOR
+
+define i32 @xor(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @xor(
+; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ insertelement (<vscale x 4 x i32> zeroinitializer, i32 2, i32 0), %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7]] = xor <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[TMP7]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[XOR:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[XOR]] = xor i32 [[TMP10]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[XOR_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi i32 [ 2, %entry ], [ %xor, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %xor = xor i32 %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret i32 %xor
+}
+
+; SMIN
+
+define i32 @smin(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @smin(
+; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP11]], [[SUM_010]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.010 = phi i32 [ 2, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %cmp.i = icmp slt i32 %0, %sum.010
+ %.sroa.speculated = select i1 %cmp.i, i32 %0, i32 %sum.010
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret i32 %.sroa.speculated
+}
+
+; UMAX
+
+define i32 @umax(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @umax(
+; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ splat (i32 2), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_010:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP11]], [[SUM_010]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], i32 [[TMP11]], i32 [[SUM_010]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi i32 [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.010 = phi i32 [ 2, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %cmp.i = icmp ugt i32 %0, %sum.010
+ %.sroa.speculated = select i1 %cmp.i, i32 %0, i32 %sum.010
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret i32 %.sroa.speculated
+}
+
+; FADD (FAST)
+
+define float @fadd_fast(ptr noalias nocapture readonly %a, i64 %n) {
+; CHECK-LABEL: define float @fadd_fast(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7]] = fadd fast <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP9:%.*]] = call fast float @llvm.vector.reduce.fadd.nxv4f32(float 0.000000e+00, <vscale x 4 x float> [[TMP7]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ADD]] = fadd fast float [[TMP10]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret float [[ADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %add = fadd fast float %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret float %add
+}
+
+define half @fadd_fast_half_zvfh(ptr noalias nocapture readonly %a, i64 %n) "target-features"="+zvfh" {
+; CHECK-LABEL: define half @fadd_fast_half_zvfh(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7]] = fadd fast <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP9:%.*]] = call fast half @llvm.vector.reduce.fadd.nxv8f16(half 0xH0000, <vscale x 8 x half> [[TMP7]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = load half, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ADD]] = fadd fast half [[TMP10]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP9]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret half [[ADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi half [ 0.000000e+00, %entry ], [ %add, %for.body ]
+ %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
+ %0 = load half, ptr %arrayidx, align 4
+ %add = fadd fast half %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret half %add
+}
+
+define half @fadd_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) "target-features"="+zvfhmin" {
+; CHECK-LABEL: define half @fadd_fast_half_zvfhmin(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR2:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 32
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP3:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds half, ptr [[TMP0]], i32 16
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x half>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x half>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2]] = fadd fast <16 x half> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = fadd fast <16 x half> [[WIDE_LOAD2]], [[VEC_PHI1]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x half> [[TMP3]], [[TMP2]]
+; CHECK-NEXT: [[TMP5:%.*]] = call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> [[BIN_RDX]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP5]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP6:%.*]] = load half, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ADD]] = fadd fast half [[TMP6]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi half [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret half [[ADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi half [ 0.000000e+00, %entry ], [ %add, %for.body ]
+ %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
+ %0 = load half, ptr %arrayidx, align 4
+ %add = fadd fast half %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret half %add
+}
+
+define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) "target-features"="+zvfbfmin" {
+; CHECK-LABEL: define bfloat @fadd_fast_bfloat(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR3:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 32
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP3:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds bfloat, ptr [[TMP0]], i32 16
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x bfloat>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x bfloat>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2]] = fadd fast <16 x bfloat> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = fadd fast <16 x bfloat> [[WIDE_LOAD2]], [[VEC_PHI1]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd fast <16 x bfloat> [[TMP3]], [[TMP2]]
+; CHECK-NEXT: [[TMP5:%.*]] = call fast bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR0000, <16 x bfloat> [[BIN_RDX]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ [[TMP5]], %[[MIDDLE_BLOCK]] ], [ 0xR0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP6:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ADD]] = fadd fast bfloat [[TMP6]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi bfloat [ [[ADD]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret bfloat [[ADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %add, %for.body ]
+ %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
+ %0 = load bfloat, ptr %arrayidx, align 4
+ %add = fadd fast bfloat %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret bfloat %add
+}
+
+; FMIN (FAST)
+
+define float @fmin_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
+; CHECK-LABEL: define float @fmin_fast(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR4:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp olt <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt float [[TMP11]], [[SUM_07]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi float [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %cmp.i = fcmp olt float %0, %sum.07
+ %.sroa.speculated = select i1 %cmp.i, float %0, float %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret float %.sroa.speculated
+}
+
+define half @fmin_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) #1 {
+; CHECK-LABEL: define half @fmin_fast_half_zvfhmin(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR5:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp olt <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call half @llvm.vector.reduce.fmin.nxv8f16(<vscale x 8 x half> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt half [[TMP11]], [[SUM_07]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi half [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
+ %0 = load half, ptr %arrayidx, align 4
+ %cmp.i = fcmp olt half %0, %sum.07
+ %.sroa.speculated = select i1 %cmp.i, half %0, half %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret half %.sroa.speculated
+}
+
+define bfloat @fmin_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 %n) #2 {
+; CHECK-LABEL: define bfloat @fmin_fast_bfloat_zvfbfmin(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR6:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp olt <vscale x 8 x bfloat> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call bfloat @llvm.vector.reduce.fmin.nxv8bf16(<vscale x 8 x bfloat> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xR0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = fcmp olt bfloat [[TMP11]], [[SUM_07]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
+ %0 = load bfloat, ptr %arrayidx, align 4
+ %cmp.i = fcmp olt bfloat %0, %sum.07
+ %.sroa.speculated = select i1 %cmp.i, bfloat %0, bfloat %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret bfloat %.sroa.speculated
+}
+
+; FMAX (FAST)
+
+define float @fmax_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
+; CHECK-LABEL: define float @fmax_fast(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR4]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast ogt <vscale x 4 x float> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call fast float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt float [[TMP11]], [[SUM_07]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], float [[TMP11]], float [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi float [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret float [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi float [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %cmp.i = fcmp fast ogt float %0, %sum.07
+ %.sroa.speculated = select i1 %cmp.i, float %0, float %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret float %.sroa.speculated
+}
+
+define half @fmax_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) #1 {
+; CHECK-LABEL: define half @fmax_fast_half_zvfhmin(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR5]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast ogt <vscale x 8 x half> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call fast half @llvm.vector.reduce.fmax.nxv8f16(<vscale x 8 x half> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt half [[TMP11]], [[SUM_07]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], half [[TMP11]], half [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi half [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret half [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi half [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
+ %0 = load half, ptr %arrayidx, align 4
+ %cmp.i = fcmp fast ogt half %0, %sum.07
+ %.sroa.speculated = select i1 %cmp.i, half %0, half %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret half %.sroa.speculated
+}
+
+define bfloat @fmax_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 %n) #2 {
+; CHECK-LABEL: define bfloat @fmax_fast_bfloat_zvfbfmin(
+; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], i64 [[N:%.*]]) #[[ATTR6]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x bfloat> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x bfloat>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast ogt <vscale x 8 x bfloat> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8]] = select <vscale x 8 x i1> [[TMP7]], <vscale x 8 x bfloat> [[WIDE_LOAD]], <vscale x 8 x bfloat> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call fast bfloat @llvm.vector.reduce.fmax.nxv8bf16(<vscale x 8 x bfloat> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xR0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[DOTSROA_SPECULATED:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP_I:%.*]] = fcmp fast ogt bfloat [[TMP11]], [[SUM_07]]
+; CHECK-NEXT: [[DOTSROA_SPECULATED]] = select i1 [[CMP_I]], bfloat [[TMP11]], bfloat [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[DOTSROA_SPECULATED_LCSSA:%.*]] = phi bfloat [ [[DOTSROA_SPECULATED]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret bfloat [[DOTSROA_SPECULATED_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
+ %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
+ %0 = load bfloat, ptr %arrayidx, align 4
+ %cmp.i = fcmp fast ogt bfloat %0, %sum.07
+ %.sroa.speculated = select i1 %cmp.i, bfloat %0, bfloat %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret bfloat %.sroa.speculated
+}
+
+; Reduction cannot be vectorized
+
+; MUL
+
+define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @mul(
+; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 16
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 16
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ <i32 2, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ splat (i32 1), %[[VECTOR_PH]] ], [ [[TMP3:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP0]], i32 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2]] = mul <8 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3]] = mul <8 x i32> [[WIDE_LOAD2]], [[VEC_PHI1]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[BIN_RDX:%.*]] = mul <8 x i32> [[TMP3]], [[TMP2]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[BIN_RDX]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP5]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MUL:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[MUL]] = mul nsw i32 [[TMP6]], [[SUM_07]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], %[[FOR_BODY]] ], [ [[TMP5]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[MUL_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi i32 [ 2, %entry ], [ %mul, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %mul = mul nsw i32 %0, %sum.07
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret i32 %mul
+}
+
+; Note: This test was added to ensure we always check the legality of reductions before checking for memory dependencies
+define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, i64 %n) {
+; CHECK-LABEL: define i32 @memory_dependence(
+; CHECK-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias readonly captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 8
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ <i32 2, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i32>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <8 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
+; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[INDEX]], 32
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP3]]
+; CHECK-NEXT: store <8 x i32> [[TMP2]], ptr [[TMP4]], align 4
+; CHECK-NEXT: [[TMP5]] = mul <8 x i32> [[WIDE_LOAD1]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[TMP5]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ [[MUL:%.*]], %[[FOR_BODY]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I]]
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP9]], [[TMP8]]
+; CHECK-NEXT: [[ADD2:%.*]] = add nuw nsw i64 [[I]], 32
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[ADD2]]
+; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX3]], align 4
+; CHECK-NEXT: [[MUL]] = mul nsw i32 [[TMP9]], [[SUM]]
+; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[MUL_LCSSA:%.*]] = phi i32 [ [[MUL]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[MUL_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %i = phi i64 [ %inc, %for.body ], [ 0, %entry ]
+ %sum = phi i32 [ %mul, %for.body ], [ 2, %entry ]
+ %arrayidx = getelementptr inbounds i32, ptr %a, i64 %i
+ %0 = load i32, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds i32, ptr %b, i64 %i
+ %1 = load i32, ptr %arrayidx1, align 4
+ %add = add nsw i32 %1, %0
+ %add2 = add nuw nsw i64 %i, 32
+ %arrayidx3 = getelementptr inbounds i32, ptr %a, i64 %add2
+ store i32 %add, ptr %arrayidx3, align 4
+ %mul = mul nsw i32 %1, %sum
+ %inc = add nuw nsw i64 %i, 1
+ %exitcond.not = icmp eq i64 %inc, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret i32 %mul
+}
+
+define float @fmuladd(ptr %a, ptr %b, i64 %n) {
+; CHECK-LABEL: define float @fmuladd(
+; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ insertelement (<vscale x 4 x float> splat (float -0.000000e+00), float 0.000000e+00, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
+; CHECK-NEXT: [[TMP8]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD1]], <vscale x 4 x float> [[VEC_PHI]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0.000000e+00, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[MULADD]] = tail call reassoc float @llvm.fmuladd.f32(float [[TMP11]], float [[TMP12]], float [[SUM_07]])
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi float [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret float [[MULADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi float [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %b, i64 %iv
+ %1 = load float, ptr %arrayidx2, align 4
+ %muladd = tail call reassoc float @llvm.fmuladd.f32(float %0, float %1, float %sum.07)
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret float %muladd
+}
+
+define half @fmuladd_f16_zvfh(ptr %a, ptr %b, i64 %n) "target-features"="+zvfh" {
+; CHECK-LABEL: define half @fmuladd_f16_zvfh(
+; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x half> [ insertelement (<vscale x 8 x half> splat (half 0xH8000), half 0xH0000, i32 0), %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x half>, ptr [[TMP7]], align 4
+; CHECK-NEXT: [[TMP8]] = call reassoc <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD1]], <vscale x 8 x half> [[VEC_PHI]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call reassoc half @llvm.vector.reduce.fadd.nxv8f16(half 0xH8000, <vscale x 8 x half> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load half, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = load half, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[MULADD]] = tail call reassoc half @llvm.fmuladd.f16(half [[TMP11]], half [[TMP12]], half [[SUM_07]])
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret half [[MULADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi half [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
+ %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
+ %0 = load half, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds half, ptr %b, i64 %iv
+ %1 = load half, ptr %arrayidx2, align 4
+ %muladd = tail call reassoc half @llvm.fmuladd.f16(half %0, half %1, half %sum.07)
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret half %muladd
+}
+
+
+; We can't scalably vectorize reductions of f16 with zvfhmin or bf16 with zvfbfmin, so make sure we use fixed-length vectors instead.
+
+define half @fmuladd_f16_zvfhmin(ptr %a, ptr %b, i64 %n) "target-features"="+zvfhmin" {
+; CHECK-LABEL: define half @fmuladd_f16_zvfhmin(
+; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR2]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 32
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x half> [ <half 0xH0000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000, half 0xH8000>, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x half> [ splat (half 0xH8000), %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds half, ptr [[TMP0]], i32 16
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x half>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x half>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds half, ptr [[TMP2]], i32 16
+; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x half>, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x half>, ptr [[TMP3]], align 4
+; CHECK-NEXT: [[TMP4]] = call reassoc <16 x half> @llvm.fmuladd.v16f16(<16 x half> [[WIDE_LOAD]], <16 x half> [[WIDE_LOAD3]], <16 x half> [[VEC_PHI]])
+; CHECK-NEXT: [[TMP5]] = call reassoc <16 x half> @llvm.fmuladd.v16f16(<16 x half> [[WIDE_LOAD2]], <16 x half> [[WIDE_LOAD4]], <16 x half> [[VEC_PHI1]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <16 x half> [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP7:%.*]] = call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH8000, <16 x half> [[BIN_RDX]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi half [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0xH0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi half [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds half, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP8:%.*]] = load half, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds half, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[TMP9:%.*]] = load half, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[MULADD]] = tail call reassoc half @llvm.fmuladd.f16(half [[TMP8]], half [[TMP9]], half [[SUM_07]])
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi half [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret half [[MULADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi half [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
+ %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
+ %0 = load half, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds half, ptr %b, i64 %iv
+ %1 = load half, ptr %arrayidx2, align 4
+ %muladd = tail call reassoc half @llvm.fmuladd.f16(half %0, half %1, half %sum.07)
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret half %muladd
+}
+
+define bfloat @fmuladd_bf16(ptr %a, ptr %b, i64 %n) "target-features"="+zvfbfmin" {
+; CHECK-LABEL: define bfloat @fmuladd_bf16(
+; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR3]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 32
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 32
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x bfloat> [ <bfloat 0xR0000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000, bfloat 0xR8000>, %[[VECTOR_PH]] ], [ [[TMP4:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <16 x bfloat> [ splat (bfloat 0xR8000), %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds bfloat, ptr [[TMP0]], i32 16
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x bfloat>, ptr [[TMP0]], align 4
+; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x bfloat>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds bfloat, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds bfloat, ptr [[TMP2]], i32 16
+; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x bfloat>, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x bfloat>, ptr [[TMP3]], align 4
+; CHECK-NEXT: [[TMP4]] = call reassoc <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> [[WIDE_LOAD]], <16 x bfloat> [[WIDE_LOAD3]], <16 x bfloat> [[VEC_PHI]])
+; CHECK-NEXT: [[TMP5]] = call reassoc <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> [[WIDE_LOAD2]], <16 x bfloat> [[WIDE_LOAD4]], <16 x bfloat> [[VEC_PHI1]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[BIN_RDX:%.*]] = fadd reassoc <16 x bfloat> [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP7:%.*]] = call reassoc bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR8000, <16 x bfloat> [[BIN_RDX]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi bfloat [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0xR0000, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM_07:%.*]] = phi bfloat [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[MULADD:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds bfloat, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[TMP8:%.*]] = load bfloat, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds bfloat, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[TMP9:%.*]] = load bfloat, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[MULADD]] = tail call reassoc bfloat @llvm.fmuladd.bf16(bfloat [[TMP8]], bfloat [[TMP9]], bfloat [[SUM_07]])
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[MULADD_LCSSA:%.*]] = phi bfloat [ [[MULADD]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret bfloat [[MULADD_LCSSA]]
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
+ %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
+ %0 = load bfloat, ptr %arrayidx, align 4
+ %arrayidx2 = getelementptr inbounds bfloat, ptr %b, i64 %iv
+ %1 = load bfloat, ptr %arrayidx2, align 4
+ %muladd = tail call reassoc bfloat @llvm.fmuladd.bf16(bfloat %0, bfloat %1, bfloat %sum.07)
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, %n
+ br i1 %exitcond.not, label %for.end, label %for.body
+
+for.end:
+ ret bfloat %muladd
+}
+
+declare float @llvm.fmuladd.f32(float, float, float)
+
+attributes #0 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" }
+attributes #1 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "target-features"="+zfhmin,+zvfhmin"}
+attributes #2 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "target-features"="+zfbfmin,+zvfbfmin"}
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-reductions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-reductions.ll
deleted file mode 100644
index 695a0c3..0000000
--- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-reductions.ll
+++ /dev/null
@@ -1,729 +0,0 @@
-; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on \
-; RUN: -riscv-v-vector-bits-max=128 \
-; RUN: -pass-remarks=loop-vectorize -pass-remarks-analysis=loop-vectorize \
-; RUN: -pass-remarks-missed=loop-vectorize -mtriple riscv64-linux-gnu \
-; RUN: -force-target-max-vector-interleave=2 -mattr=+v,+f -S 2>%t \
-; RUN: | FileCheck %s -check-prefix=CHECK
-; RUN: cat %t | FileCheck %s -check-prefix=CHECK-REMARK
-
-; Reduction can be vectorized
-
-; ADD
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define i32 @add(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @add
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[ADD1:.*]] = add <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[ADD2:.*]] = add <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[ADD:.*]] = add <vscale x 8 x i32> %[[ADD2]], %[[ADD1]]
-; CHECK-NEXT: call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> %[[ADD]])
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi i32 [ 2, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
- %0 = load i32, ptr %arrayidx, align 4
- %add = add nsw i32 %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end: ; preds = %for.body, %entry
- ret i32 %add
-}
-
-; OR
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define i32 @or(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @or
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[OR1:.*]] = or <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[OR2:.*]] = or <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[OR:.*]] = or <vscale x 8 x i32> %[[OR2]], %[[OR1]]
-; CHECK-NEXT: call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> %[[OR]])
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi i32 [ 2, %entry ], [ %or, %for.body ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
- %0 = load i32, ptr %arrayidx, align 4
- %or = or i32 %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end: ; preds = %for.body, %entry
- ret i32 %or
-}
-
-; AND
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define i32 @and(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @and
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[AND1:.*]] = and <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[AND2:.*]] = and <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[ABD:.*]] = and <vscale x 8 x i32> %[[ADD2]], %[[AND1]]
-; CHECK-NEXT: call i32 @llvm.vector.reduce.and.nxv8i32(<vscale x 8 x i32> %[[ADD]])
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi i32 [ 2, %entry ], [ %and, %for.body ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
- %0 = load i32, ptr %arrayidx, align 4
- %and = and i32 %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end: ; preds = %for.body, %entry
- ret i32 %and
-}
-
-; XOR
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define i32 @xor(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @xor
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[XOR1:.*]] = xor <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[XOR2:.*]] = xor <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[XOR:.*]] = xor <vscale x 8 x i32> %[[XOR2]], %[[XOR1]]
-; CHECK-NEXT: call i32 @llvm.vector.reduce.xor.nxv8i32(<vscale x 8 x i32> %[[XOR]])
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi i32 [ 2, %entry ], [ %xor, %for.body ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
- %0 = load i32, ptr %arrayidx, align 4
- %xor = xor i32 %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end: ; preds = %for.body, %entry
- ret i32 %xor
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-; SMIN
-
-define i32 @smin(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @smin
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[ICMP1:.*]] = icmp slt <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[ICMP2:.*]] = icmp slt <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[ICMP1]], <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[ICMP2]], <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[RDX:.*]] = call <vscale x 8 x i32> @llvm.smin.nxv8i32(<vscale x 8 x i32> %[[SEL1]], <vscale x 8 x i32> %[[SEL2]])
-; CHECK-NEXT: call i32 @llvm.vector.reduce.smin.nxv8i32(<vscale x 8 x i32> %[[RDX]])
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.010 = phi i32 [ 2, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
- %0 = load i32, ptr %arrayidx, align 4
- %cmp.i = icmp slt i32 %0, %sum.010
- %.sroa.speculated = select i1 %cmp.i, i32 %0, i32 %sum.010
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret i32 %.sroa.speculated
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-; UMAX
-
-define i32 @umax(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @umax
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x i32>
-; CHECK: %[[ICMP1:.*]] = icmp ugt <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[ICMP2:.*]] = icmp ugt <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[ICMP1]], <vscale x 8 x i32> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[ICMP2]], <vscale x 8 x i32> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[RDX:.*]] = call <vscale x 8 x i32> @llvm.umax.nxv8i32(<vscale x 8 x i32> %[[SEL1]], <vscale x 8 x i32> %[[SEL2]])
-; CHECK-NEXT: call i32 @llvm.vector.reduce.umax.nxv8i32(<vscale x 8 x i32> %[[RDX]])
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.010 = phi i32 [ 2, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
- %0 = load i32, ptr %arrayidx, align 4
- %cmp.i = icmp ugt i32 %0, %sum.010
- %.sroa.speculated = select i1 %cmp.i, i32 %0, i32 %sum.010
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret i32 %.sroa.speculated
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-; FADD (FAST)
-
-define float @fadd_fast(ptr noalias nocapture readonly %a, i64 %n) {
-; CHECK-LABEL: @fadd_fast
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x float>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x float>
-; CHECK: %[[ADD1:.*]] = fadd fast <vscale x 8 x float> %[[LOAD1]]
-; CHECK: %[[ADD2:.*]] = fadd fast <vscale x 8 x float> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[ADD:.*]] = fadd fast <vscale x 8 x float> %[[ADD2]], %[[ADD1]]
-; CHECK-NEXT: call fast float @llvm.vector.reduce.fadd.nxv8f32(float 0.000000e+00, <vscale x 8 x float> %[[ADD]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
- %0 = load float, ptr %arrayidx, align 4
- %add = fadd fast float %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret float %add
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define half @fadd_fast_half_zvfh(ptr noalias nocapture readonly %a, i64 %n) "target-features"="+zvfh" {
-; CHECK-LABEL: @fadd_fast_half_zvfh
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x half>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x half>
-; CHECK: %[[FADD1:.*]] = fadd fast <vscale x 8 x half> %[[LOAD1]]
-; CHECK: %[[FADD2:.*]] = fadd fast <vscale x 8 x half> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[RDX:.*]] = fadd fast <vscale x 8 x half> %[[FADD2]], %[[FADD1]]
-; CHECK: call fast half @llvm.vector.reduce.fadd.nxv8f16(half 0xH0000, <vscale x 8 x half> %[[RDX]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi half [ 0.000000e+00, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
- %0 = load half, ptr %arrayidx, align 4
- %add = fadd fast half %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret half %add
-}
-
-; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
-; CHECK-REMARK: vectorized loop (vectorization width: 16, interleaved count: 2)
-define half @fadd_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) "target-features"="+zvfhmin" {
-; CHECK-LABEL: @fadd_fast_half_zvfhmin
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <16 x half>
-; CHECK: %[[LOAD2:.*]] = load <16 x half>
-; CHECK: %[[FADD1:.*]] = fadd fast <16 x half> %[[LOAD1]]
-; CHECK: %[[FADD2:.*]] = fadd fast <16 x half> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[RDX:.*]] = fadd fast <16 x half> %[[FADD2]], %[[FADD1]]
-; CHECK: call fast half @llvm.vector.reduce.fadd.v16f16(half 0xH0000, <16 x half> %[[RDX]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi half [ 0.000000e+00, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
- %0 = load half, ptr %arrayidx, align 4
- %add = fadd fast half %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret half %add
-}
-
-; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
-; CHECK-REMARK: vectorized loop (vectorization width: 16, interleaved count: 2)
-define bfloat @fadd_fast_bfloat(ptr noalias nocapture readonly %a, i64 %n) "target-features"="+zvfbfmin" {
-; CHECK-LABEL: @fadd_fast_bfloat
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <16 x bfloat>
-; CHECK: %[[LOAD2:.*]] = load <16 x bfloat>
-; CHECK: %[[FADD1:.*]] = fadd fast <16 x bfloat> %[[LOAD1]]
-; CHECK: %[[FADD2:.*]] = fadd fast <16 x bfloat> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[RDX:.*]] = fadd fast <16 x bfloat> %[[FADD2]], %[[FADD1]]
-; CHECK: call fast bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR0000, <16 x bfloat> %[[RDX]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
- %0 = load bfloat, ptr %arrayidx, align 4
- %add = fadd fast bfloat %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret bfloat %add
-}
-
-; FMIN (FAST)
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define float @fmin_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
-; CHECK-LABEL: @fmin_fast
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x float>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x float>
-; CHECK: %[[FCMP1:.*]] = fcmp olt <vscale x 8 x float> %[[LOAD1]]
-; CHECK: %[[FCMP2:.*]] = fcmp olt <vscale x 8 x float> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x float> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x float> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[FCMP:.*]] = fcmp olt <vscale x 8 x float> %[[SEL1]], %[[SEL2]]
-; CHECK-NEXT: %[[SEL:.*]] = select <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x float> %[[SEL1]], <vscale x 8 x float> %[[SEL2]]
-; CHECK-NEXT: call float @llvm.vector.reduce.fmin.nxv8f32(<vscale x 8 x float> %[[SEL]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi float [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
- %0 = load float, ptr %arrayidx, align 4
- %cmp.i = fcmp olt float %0, %sum.07
- %.sroa.speculated = select i1 %cmp.i, float %0, float %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret float %.sroa.speculated
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define half @fmin_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) #1 {
-; CHECK-LABEL: @fmin_fast
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x half>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x half>
-; CHECK: %[[FCMP1:.*]] = fcmp olt <vscale x 8 x half> %[[LOAD1]]
-; CHECK: %[[FCMP2:.*]] = fcmp olt <vscale x 8 x half> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x half> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x half> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[FCMP:.*]] = fcmp olt <vscale x 8 x half> %[[SEL1]], %[[SEL2]]
-; CHECK-NEXT: %[[SEL:.*]] = select <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x half> %[[SEL1]], <vscale x 8 x half> %[[SEL2]]
-; CHECK-NEXT: call half @llvm.vector.reduce.fmin.nxv8f16(<vscale x 8 x half> %[[SEL]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi half [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
- %0 = load half, ptr %arrayidx, align 4
- %cmp.i = fcmp olt half %0, %sum.07
- %.sroa.speculated = select i1 %cmp.i, half %0, half %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret half %.sroa.speculated
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define bfloat @fmin_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 %n) #2 {
-; CHECK-LABEL: @fmin_fast
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x bfloat>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x bfloat>
-; CHECK: %[[FCMP1:.*]] = fcmp olt <vscale x 8 x bfloat> %[[LOAD1]]
-; CHECK: %[[FCMP2:.*]] = fcmp olt <vscale x 8 x bfloat> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x bfloat> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x bfloat> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[FCMP:.*]] = fcmp olt <vscale x 8 x bfloat> %[[SEL1]], %[[SEL2]]
-; CHECK-NEXT: %[[SEL:.*]] = select <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x bfloat> %[[SEL1]], <vscale x 8 x bfloat> %[[SEL2]]
-; CHECK-NEXT: call bfloat @llvm.vector.reduce.fmin.nxv8bf16(<vscale x 8 x bfloat> %[[SEL]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
- %0 = load bfloat, ptr %arrayidx, align 4
- %cmp.i = fcmp olt bfloat %0, %sum.07
- %.sroa.speculated = select i1 %cmp.i, bfloat %0, bfloat %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret bfloat %.sroa.speculated
-}
-
-; FMAX (FAST)
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define float @fmax_fast(ptr noalias nocapture readonly %a, i64 %n) #0 {
-; CHECK-LABEL: @fmax_fast
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x float>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x float>
-; CHECK: %[[FCMP1:.*]] = fcmp fast ogt <vscale x 8 x float> %[[LOAD1]]
-; CHECK: %[[FCMP2:.*]] = fcmp fast ogt <vscale x 8 x float> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x float> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x float> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[FCMP:.*]] = fcmp fast ogt <vscale x 8 x float> %[[SEL1]], %[[SEL2]]
-; CHECK-NEXT: %[[SEL:.*]] = select fast <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x float> %[[SEL1]], <vscale x 8 x float> %[[SEL2]]
-; CHECK-NEXT: call fast float @llvm.vector.reduce.fmax.nxv8f32(<vscale x 8 x float> %[[SEL]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi float [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
- %0 = load float, ptr %arrayidx, align 4
- %cmp.i = fcmp fast ogt float %0, %sum.07
- %.sroa.speculated = select i1 %cmp.i, float %0, float %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret float %.sroa.speculated
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define half @fmax_fast_half_zvfhmin(ptr noalias nocapture readonly %a, i64 %n) #1 {
-; CHECK-LABEL: @fmax_fast
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x half>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x half>
-; CHECK: %[[FCMP1:.*]] = fcmp fast ogt <vscale x 8 x half> %[[LOAD1]]
-; CHECK: %[[FCMP2:.*]] = fcmp fast ogt <vscale x 8 x half> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x half> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x half> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[FCMP:.*]] = fcmp fast ogt <vscale x 8 x half> %[[SEL1]], %[[SEL2]]
-; CHECK-NEXT: %[[SEL:.*]] = select fast <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x half> %[[SEL1]], <vscale x 8 x half> %[[SEL2]]
-; CHECK-NEXT: call fast half @llvm.vector.reduce.fmax.nxv8f16(<vscale x 8 x half> %[[SEL]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi half [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
- %0 = load half, ptr %arrayidx, align 4
- %cmp.i = fcmp fast ogt half %0, %sum.07
- %.sroa.speculated = select i1 %cmp.i, half %0, half %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret half %.sroa.speculated
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define bfloat @fmax_fast_bfloat_zvfbfmin(ptr noalias nocapture readonly %a, i64 %n) #2 {
-; CHECK-LABEL: @fmax_fast
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <vscale x 8 x bfloat>
-; CHECK: %[[LOAD2:.*]] = load <vscale x 8 x bfloat>
-; CHECK: %[[FCMP1:.*]] = fcmp fast ogt <vscale x 8 x bfloat> %[[LOAD1]]
-; CHECK: %[[FCMP2:.*]] = fcmp fast ogt <vscale x 8 x bfloat> %[[LOAD2]]
-; CHECK: %[[SEL1:.*]] = select <vscale x 8 x i1> %[[FCMP1]], <vscale x 8 x bfloat> %[[LOAD1]]
-; CHECK: %[[SEL2:.*]] = select <vscale x 8 x i1> %[[FCMP2]], <vscale x 8 x bfloat> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[FCMP:.*]] = fcmp fast ogt <vscale x 8 x bfloat> %[[SEL1]], %[[SEL2]]
-; CHECK-NEXT: %[[SEL:.*]] = select fast <vscale x 8 x i1> %[[FCMP]], <vscale x 8 x bfloat> %[[SEL1]], <vscale x 8 x bfloat> %[[SEL2]]
-; CHECK-NEXT: call fast bfloat @llvm.vector.reduce.fmax.nxv8bf16(<vscale x 8 x bfloat> %[[SEL]])
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %.sroa.speculated, %for.body ]
- %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
- %0 = load bfloat, ptr %arrayidx, align 4
- %cmp.i = fcmp fast ogt bfloat %0, %sum.07
- %.sroa.speculated = select i1 %cmp.i, bfloat %0, bfloat %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret bfloat %.sroa.speculated
-}
-
-; Reduction cannot be vectorized
-
-; MUL
-
-; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
-; CHECK-REMARK: vectorized loop (vectorization width: 8, interleaved count: 2)
-define i32 @mul(ptr nocapture %a, ptr nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @mul
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <8 x i32>
-; CHECK: %[[MUL1:.*]] = mul <8 x i32> %[[LOAD1]]
-; CHECK: %[[MUL2:.*]] = mul <8 x i32> %[[LOAD2]]
-; CHECK: middle.block:
-; CHECK: %[[RDX:.*]] = mul <8 x i32> %[[MUL2]], %[[MUL1]]
-; CHECK: call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %[[RDX]])
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi i32 [ 2, %entry ], [ %mul, %for.body ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %iv
- %0 = load i32, ptr %arrayidx, align 4
- %mul = mul nsw i32 %0, %sum.07
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end: ; preds = %for.body, %entry
- ret i32 %mul
-}
-
-; Note: This test was added to ensure we always check the legality of reductions (and emit a warning if necessary) before checking for memory dependencies
-; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
-; CHECK-REMARK: vectorized loop (vectorization width: 8, interleaved count: 2)
-define i32 @memory_dependence(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, i64 %n) {
-; CHECK-LABEL: @memory_dependence
-; CHECK: vector.body:
-; CHECK: %[[LOAD1:.*]] = load <8 x i32>
-; CHECK: %[[LOAD2:.*]] = load <8 x i32>
-; CHECK: %[[LOAD3:.*]] = load <8 x i32>
-; CHECK: %[[LOAD4:.*]] = load <8 x i32>
-; CHECK: %[[ADD1:.*]] = add nsw <8 x i32> %[[LOAD3]], %[[LOAD1]]
-; CHECK: %[[ADD2:.*]] = add nsw <8 x i32> %[[LOAD4]], %[[LOAD2]]
-; CHECK: %[[MUL1:.*]] = mul <8 x i32> %[[LOAD3]]
-; CHECK: %[[MUL2:.*]] = mul <8 x i32> %[[LOAD4]]
-; CHECK: middle.block:
-; CHECK: %[[RDX:.*]] = mul <8 x i32> %[[MUL2]], %[[MUL1]]
-; CHECK: call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> %[[RDX]])
-entry:
- br label %for.body
-
-for.body:
- %i = phi i64 [ %inc, %for.body ], [ 0, %entry ]
- %sum = phi i32 [ %mul, %for.body ], [ 2, %entry ]
- %arrayidx = getelementptr inbounds i32, ptr %a, i64 %i
- %0 = load i32, ptr %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32, ptr %b, i64 %i
- %1 = load i32, ptr %arrayidx1, align 4
- %add = add nsw i32 %1, %0
- %add2 = add nuw nsw i64 %i, 32
- %arrayidx3 = getelementptr inbounds i32, ptr %a, i64 %add2
- store i32 %add, ptr %arrayidx3, align 4
- %mul = mul nsw i32 %1, %sum
- %inc = add nuw nsw i64 %i, 1
- %exitcond.not = icmp eq i64 %inc, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0
-
-for.end:
- ret i32 %mul
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 4, interleaved count: 2)
-define float @fmuladd(ptr %a, ptr %b, i64 %n) {
-; CHECK-LABEL: @fmuladd(
-; CHECK: vector.body:
-; CHECK: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>
-; CHECK: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x float>
-; CHECK: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x float>
-; CHECK: [[WIDE_LOAD4:%.*]] = load <vscale x 4 x float>
-; CHECK: [[MULADD1:%.*]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD]], <vscale x 4 x float> [[WIDE_LOAD3]],
-; CHECK: [[MULADD2:%.*]] = call reassoc <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> [[WIDE_LOAD2]], <vscale x 4 x float> [[WIDE_LOAD4]],
-; CHECK: middle.block:
-; CHECK: [[BIN_RDX:%.*]] = fadd reassoc <vscale x 4 x float> [[MULADD2]], [[MULADD1]]
-; CHECK: call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[BIN_RDX]])
-;
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi float [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %a, i64 %iv
- %0 = load float, ptr %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds float, ptr %b, i64 %iv
- %1 = load float, ptr %arrayidx2, align 4
- %muladd = tail call reassoc float @llvm.fmuladd.f32(float %0, float %1, float %sum.07)
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !1
-
-for.end:
- ret float %muladd
-}
-
-; CHECK-REMARK: vectorized loop (vectorization width: vscale x 8, interleaved count: 2)
-define half @fmuladd_f16_zvfh(ptr %a, ptr %b, i64 %n) "target-features"="+zvfh" {
-; CHECK-LABEL: @fmuladd_f16_zvfh(
-; CHECK: vector.body:
-; CHECK: [[WIDE_LOAD:%.*]] = load <vscale x 8 x half>
-; CHECK: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x half>
-; CHECK: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x half>
-; CHECK: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x half>
-; CHECK: [[MULADD1:%.*]] = call reassoc <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD]], <vscale x 8 x half> [[WIDE_LOAD3]],
-; CHECK: [[MULADD2:%.*]] = call reassoc <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> [[WIDE_LOAD2]], <vscale x 8 x half> [[WIDE_LOAD4]],
-; CHECK: middle.block:
-; CHECK: [[BIN_RDX:%.*]] = fadd reassoc <vscale x 8 x half> [[MULADD2]], [[MULADD1]]
-; CHECK: call reassoc half @llvm.vector.reduce.fadd.nxv8f16(half 0xH8000, <vscale x 8 x half> [[BIN_RDX]])
-;
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi half [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
- %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
- %0 = load half, ptr %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds half, ptr %b, i64 %iv
- %1 = load half, ptr %arrayidx2, align 4
- %muladd = tail call reassoc half @llvm.fmuladd.f16(half %0, half %1, half %sum.07)
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !1
-
-for.end:
- ret half %muladd
-}
-
-
-; We can't scalably vectorize reductions of f16 with zvfhmin or bf16 with zvfbfmin, so make sure we use fixed-length vectors instead.
-
-; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
-; CHECK-REMARK: vectorized loop (vectorization width: 16, interleaved count: 2)
-define half @fmuladd_f16_zvfhmin(ptr %a, ptr %b, i64 %n) "target-features"="+zvfhmin" {
-; CHECK-LABEL: @fmuladd_f16_zvfhmin(
-; CHECK: vector.body:
-; CHECK: [[WIDE_LOAD:%.*]] = load <16 x half>
-; CHECK: [[WIDE_LOAD2:%.*]] = load <16 x half>
-; CHECK: [[WIDE_LOAD3:%.*]] = load <16 x half>
-; CHECK: [[WIDE_LOAD4:%.*]] = load <16 x half>
-; CHECK: [[MULADD1:%.*]] = call reassoc <16 x half> @llvm.fmuladd.v16f16(<16 x half> [[WIDE_LOAD]], <16 x half> [[WIDE_LOAD3]],
-; CHECK: [[MULADD2:%.*]] = call reassoc <16 x half> @llvm.fmuladd.v16f16(<16 x half> [[WIDE_LOAD2]], <16 x half> [[WIDE_LOAD4]],
-; CHECK: middle.block:
-; CHECK: [[BIN_RDX:%.*]] = fadd reassoc <16 x half> [[MULADD2]], [[MULADD1]]
-; CHECK: call reassoc half @llvm.vector.reduce.fadd.v16f16(half 0xH8000, <16 x half> [[BIN_RDX]])
-;
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi half [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
- %arrayidx = getelementptr inbounds half, ptr %a, i64 %iv
- %0 = load half, ptr %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds half, ptr %b, i64 %iv
- %1 = load half, ptr %arrayidx2, align 4
- %muladd = tail call reassoc half @llvm.fmuladd.f16(half %0, half %1, half %sum.07)
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !1
-
-for.end:
- ret half %muladd
-}
-
-; CHECK-REMARK: Scalable vectorization not supported for the reduction operations found in this loop.
-; CHECK-REMARK: vectorized loop (vectorization width: 16, interleaved count: 2)
-define bfloat @fmuladd_bf16(ptr %a, ptr %b, i64 %n) "target-features"="+zvfbfmin" {
-; CHECK-LABEL: @fmuladd_bf16(
-; CHECK: vector.body:
-; CHECK: [[WIDE_LOAD:%.*]] = load <16 x bfloat>
-; CHECK: [[WIDE_LOAD2:%.*]] = load <16 x bfloat>
-; CHECK: [[WIDE_LOAD3:%.*]] = load <16 x bfloat>
-; CHECK: [[WIDE_LOAD4:%.*]] = load <16 x bfloat>
-; CHECK: [[MULADD1:%.*]] = call reassoc <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> [[WIDE_LOAD]], <16 x bfloat> [[WIDE_LOAD3]],
-; CHECK: [[MULADD2:%.*]] = call reassoc <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> [[WIDE_LOAD2]], <16 x bfloat> [[WIDE_LOAD4]],
-; CHECK: middle.block:
-; CHECK: [[BIN_RDX:%.*]] = fadd reassoc <16 x bfloat> [[MULADD2]], [[MULADD1]]
-; CHECK: call reassoc bfloat @llvm.vector.reduce.fadd.v16bf16(bfloat 0xR8000, <16 x bfloat> [[BIN_RDX]])
-;
-entry:
- br label %for.body
-
-for.body:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %sum.07 = phi bfloat [ 0.000000e+00, %entry ], [ %muladd, %for.body ]
- %arrayidx = getelementptr inbounds bfloat, ptr %a, i64 %iv
- %0 = load bfloat, ptr %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds bfloat, ptr %b, i64 %iv
- %1 = load bfloat, ptr %arrayidx2, align 4
- %muladd = tail call reassoc bfloat @llvm.fmuladd.bf16(bfloat %0, bfloat %1, bfloat %sum.07)
- %iv.next = add nuw nsw i64 %iv, 1
- %exitcond.not = icmp eq i64 %iv.next, %n
- br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !1
-
-for.end:
- ret bfloat %muladd
-}
-
-declare float @llvm.fmuladd.f32(float, float, float)
-
-attributes #0 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" }
-attributes #1 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "target-features"="+zfhmin,+zvfhmin"}
-attributes #2 = { "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "target-features"="+zfbfmin,+zvfbfmin"}
-
-!0 = distinct !{!0, !1, !2, !3, !4}
-!1 = !{!"llvm.loop.vectorize.width", i32 8}
-!2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
-!3 = !{!"llvm.loop.interleave.count", i32 2}
-!4 = !{!"llvm.loop.vectorize.enable", i1 true}
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
index 60f3181..ed50796 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
@@ -24,7 +24,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
@@ -32,6 +32,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP7]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
@@ -88,7 +89,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
@@ -96,6 +97,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x ptr> align 8 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP12]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP12]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
@@ -149,7 +151,7 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
@@ -159,6 +161,7 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64
; CHECK-NEXT: [[TMP11]] = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> [[TMP12]], <vscale x 2 x i64> [[VEC_PHI]], i32 [[TMP7]])
; CHECK-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
@@ -220,12 +223,13 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP9]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: middle.block:
@@ -276,13 +280,14 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: store i64 [[V]], ptr [[B:%.*]], align 8
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: middle.block:
@@ -395,7 +400,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
@@ -403,6 +408,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP7]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP10]])
; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP10]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
index 745b8ba..5c6febc 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/select-cmp-reduction.ll
@@ -1,60 +1,57 @@
-; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -S \
-; RUN: < %s | FileCheck %s
-; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=4 \
-; RUN: -scalable-vectorization=on -S < %s | FileCheck %s -check-prefix=SCALABLE
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
+; RUN: opt -p loop-vectorize -mtriple riscv64 -mattr=+v -S < %s | FileCheck %s
-target triple = "riscv64"
-
-define i32 @select_icmp(i32 %x, i32 %y, ptr nocapture readonly %c, i64 %n) #0 {
-; CHECK-LABEL: @select_icmp
-; CHECK: vector.ph:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, 4
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = icmp sge <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP5]] = or <4 x i1> [[VEC_PHI]], [[TMP4]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
-; CHECK-NEXT: [[FR:%.*]] = freeze i1 [[TMP7]]
-; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 %y, i32 0
-; CHECK-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
-;
-; SCALABLE-LABEL: @select_icmp
-; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
-; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X:%.*]], i64 0
-; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
-; SCALABLE: vector.body:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4
-; SCALABLE-NEXT: [[TMP8:%.*]] = icmp sge <vscale x 4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; SCALABLE-NEXT: [[TMP9]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP8]]
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[TMP13:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP9]])
-; SCALABLE-NEXT: [[FR:%.*]] = freeze i1 [[TMP13]]
-; SCALABLE-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 %y, i32 0
-; SCALABLE-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
+define i32 @select_icmp(i32 %x, i32 %y, ptr nocapture readonly %c, i64 %n) {
+; CHECK-LABEL: define i32 @select_icmp(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]], ptr readonly captures(none) [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[X]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = icmp sge <vscale x 4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
+; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[A:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP12]], [[X]]
+; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]]
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[COND_LCSSA]]
;
entry:
br label %for.body
@@ -74,56 +71,57 @@ for.end:
ret i32 %cond
}
-define i32 @select_fcmp(float %x, i32 %y, ptr nocapture readonly %c, i64 %n) #0 {
-; CHECK-LABEL: @select_fcmp
-; CHECK: vector.ph:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, 4
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[X:%.*]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[C:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = fcmp fast uge <4 x float> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP5]] = or <4 x i1> [[VEC_PHI]], [[TMP4]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
-; CHECK-NEXT: [[FR:%.*]] = freeze i1 [[TMP7]]
-; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 %y, i32 0
-; CHECK-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
-;
-; SCALABLE-LABEL: @select_fcmp
-; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
-; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[X:%.*]], i64 0
-; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
-; SCALABLE: vector.body:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[C:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP5]], align 4
-; SCALABLE-NEXT: [[TMP8:%.*]] = fcmp fast uge <vscale x 4 x float> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; SCALABLE-NEXT: [[TMP9]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP8]]
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
-; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[TMP13:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP9]])
-; SCALABLE-NEXT: [[FR:%.*]] = freeze i1 [[TMP13]]
-; SCALABLE-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 %y, i32 0
-; SCALABLE-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
+define i32 @select_fcmp(float %x, i32 %y, ptr nocapture readonly %c, i64 %n) {
+; CHECK-LABEL: define i32 @select_fcmp(
+; CHECK-SAME: float [[X:%.*]], i32 [[Y:%.*]], ptr readonly captures(none) [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[X]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[C]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast uge <vscale x 4 x float> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
+; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[Y]], i32 0
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[A:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[COND:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast olt float [[TMP12]], [[X]]
+; CHECK-NEXT: [[COND]] = select i1 [[CMP1]], i32 [[A]], i32 [[Y]]
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK: [[FOR_END]]:
+; CHECK-NEXT: [[COND_LCSSA:%.*]] = phi i32 [ [[COND]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[COND_LCSSA]]
;
entry:
br label %for.body
@@ -143,52 +141,55 @@ for.end:
ret i32 %cond
}
-define i32 @select_const_i32_from_icmp(ptr nocapture readonly %v, i64 %n) #0 {
-; CHECK-LABEL: @select_const_i32_from_icmp
-; CHECK: vector.ph:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, 4
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[V:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[WIDE_LOAD]], splat (i32 3)
-; CHECK-NEXT: [[TMP5]] = or <4 x i1> [[VEC_PHI]], [[TMP4]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
-; CHECK-NEXT: [[FR:%.*]] = freeze i1 [[TMP7]]
-; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 7, i32 3
-; CHECK-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
-;
-; SCALABLE-LABEL: @select_const_i32_from_icmp
-; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
-; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
-; SCALABLE: vector.body:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[V:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4
-; SCALABLE-NEXT: [[TMP8:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
-; SCALABLE-NEXT: [[TMP9]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP8]]
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
-; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[TMP13:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP9]])
-; SCALABLE-NEXT: [[FR:%.*]] = freeze i1 [[TMP13]]
-; SCALABLE-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 7, i32 3
-; SCALABLE-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
+define i32 @select_const_i32_from_icmp(ptr nocapture readonly %v, i64 %n) {
+; CHECK-LABEL: define i32 @select_const_i32_from_icmp(
+; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
+; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
+; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 7, i32 3
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 3, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
+; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3
+; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 7
+; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1
+; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]]
+; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[DOTLCSSA]]
;
entry:
br label %for.body
@@ -208,52 +209,55 @@ exit: ; preds = %for.body
ret i32 %5
}
-define i32 @select_i32_from_icmp(ptr nocapture readonly %v, i32 %a, i32 %b, i64 %n) #0 {
-; CHECK-LABEL: @select_i32_from_icmp
-; CHECK: vector.ph:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, 4
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[V:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = icmp ne <4 x i32> [[WIDE_LOAD]], splat (i32 3)
-; CHECK-NEXT: [[TMP5]] = or <4 x i1> [[VEC_PHI]], [[TMP4]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
-; CHECK-NEXT: [[FR:%.*]] = freeze i1 [[TMP7]]
-; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 %b, i32 %a
-; CHECK-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
-;
-; SCALABLE-LABEL: @select_i32_from_icmp
-; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
-; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
-; SCALABLE: vector.body:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[V:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4
-; SCALABLE-NEXT: [[TMP8:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
-; SCALABLE-NEXT: [[TMP9]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP8]]
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
-; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[TMP13:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP9]])
-; SCALABLE-NEXT: [[FR:%.*]] = freeze i1 [[TMP13]]
-; SCALABLE-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 %b, i32 %a
-; SCALABLE-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
+define i32 @select_i32_from_icmp(ptr nocapture readonly %v, i32 %a, i32 %b, i64 %n) {
+; CHECK-LABEL: define i32 @select_i32_from_icmp(
+; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i32 [[A:%.*]], i32 [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
+; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
+; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 [[B]], i32 [[A]]
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ [[A]], %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4
+; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[TMP15]], 3
+; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 [[B]]
+; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1
+; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]]
+; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[DOTLCSSA]]
;
entry:
br label %for.body
@@ -273,52 +277,55 @@ exit: ; preds = %for.body
ret i32 %5
}
-define i32 @select_const_i32_from_fcmp(ptr nocapture readonly %v, i64 %n) #0 {
-; CHECK-LABEL: @select_const_i32_from_fcmp
-; CHECK: vector.ph:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, 4
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[V:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = fcmp fast one <4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
-; CHECK-NEXT: [[TMP5]] = or <4 x i1> [[VEC_PHI]], [[TMP4]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
-; CHECK-NEXT: [[FR:%.*]] = freeze i1 [[TMP7]]
-; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 1, i32 2
-; CHECK-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
-;
-; SCALABLE-LABEL: @select_const_i32_from_fcmp
-; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; SCALABLE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4
-; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
-; SCALABLE: vector.body:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP9:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[V:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP5]], align 4
-; SCALABLE-NEXT: [[TMP8:%.*]] = fcmp fast one <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
-; SCALABLE-NEXT: [[TMP9]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP8]]
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
-; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[TMP13:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP9]])
-; SCALABLE-NEXT: [[FR:%.*]] = freeze i1 [[TMP13]]
-; SCALABLE-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 1, i32 2
-; SCALABLE-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
+define i32 @select_const_i32_from_fcmp(ptr nocapture readonly %v, i64 %n) {
+; CHECK-LABEL: define i32 @select_const_i32_from_fcmp(
+; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[V]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = fcmp fast one <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
+; CHECK-NEXT: [[TMP8]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP7]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP8]])
+; CHECK-NEXT: [[TMP11:%.*]] = freeze i1 [[TMP10]]
+; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP11]], i32 1, i32 2
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 2, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[TMP12:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[TMP18:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP13:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[TMP17:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[V]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4
+; CHECK-NEXT: [[TMP16:%.*]] = fcmp fast ueq float [[TMP15]], 3.000000e+00
+; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], i32 [[TMP13]], i32 1
+; CHECK-NEXT: [[TMP18]] = add nuw nsw i64 [[TMP12]], 1
+; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[TMP18]], [[N]]
+; CHECK-NEXT: br i1 [[TMP19]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], %[[FOR_BODY]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[DOTLCSSA]]
;
entry:
br label %for.body
@@ -338,12 +345,24 @@ exit: ; preds = %for.body
ret i32 %5
}
-define float @select_const_f32_from_icmp(ptr nocapture readonly %v, i64 %n) #0 {
-; CHECK-LABEL: @select_const_f32_from_icmp
-; CHECK-NOT: vector.body
-;
-; SCALABLE-LABEL: @select_const_f32_from_icmp
-; SCALABLE-NOT: vector.body
+define float @select_const_f32_from_icmp(ptr nocapture readonly %v, i64 %n) {
+; CHECK-LABEL: define float @select_const_f32_from_icmp(
+; CHECK-SAME: ptr readonly captures(none) [[V:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[TMP6:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = phi fast float [ 3.000000e+00, %[[ENTRY]] ], [ [[TMP5:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[V]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP3]], 3
+; CHECK-NEXT: [[TMP5]] = select fast i1 [[TMP4]], float [[TMP1]], float 7.000000e+00
+; CHECK-NEXT: [[TMP6]] = add nuw nsw i64 [[TMP0]], 1
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[TMP6]], [[N]]
+; CHECK-NEXT: br i1 [[TMP7]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi float [ [[TMP5]], %[[FOR_BODY]] ]
+; CHECK-NEXT: ret float [[DOTLCSSA]]
;
entry:
br label %for.body
@@ -363,60 +382,67 @@ exit: ; preds = %for.body
ret float %5
}
-define i32 @pred_select_const_i32_from_icmp(ptr noalias nocapture readonly %src1, ptr noalias nocapture readonly %src2, i64 %n) #0 {
-; CHECK-LABEL: @pred_select_const_i32_from_icmp
-; CHECK: vector.ph:
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, 4
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[SRC1:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[TMP4:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], splat (i32 35)
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[SRC2:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP5]], i32 4, <4 x i1> [[TMP4]], <4 x i32> poison)
-; CHECK-NEXT: [[TMP8:%.*]] = icmp eq <4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 2)
-; CHECK-NEXT: [[TMP9:%.*]] = or <4 x i1> [[VEC_PHI]], [[TMP8]]
-; CHECK-NEXT: [[PREDPHI]] = select <4 x i1> [[TMP4]], <4 x i1> [[TMP9]], <4 x i1> [[VEC_PHI]]
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+define i32 @pred_select_const_i32_from_icmp(ptr noalias nocapture readonly %src1, ptr noalias nocapture readonly %src2, i64 %n) {
+; CHECK-LABEL: define i32 @pred_select_const_i32_from_icmp(
+; CHECK-SAME: ptr noalias readonly captures(none) [[SRC1:%.*]], ptr noalias readonly captures(none) [[SRC2:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PREDPHI:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; CHECK-NEXT: [[TMP7:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 35)
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[SRC2]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> poison)
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 2)
+; CHECK-NEXT: [[TMP10:%.*]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP9]]
+; CHECK-NEXT: [[PREDPHI]] = select <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i1> [[TMP10]], <vscale x 4 x i1> [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: [[TMP12:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[PREDPHI]])
-; CHECK-NEXT: [[FR:%.*]] = freeze i1 [[TMP12]]
-; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 1, i32 0
-; CHECK-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
-;
-; SCALABLE-LABEL: @pred_select_const_i32_from_icmp
-; SCALABLE: vector.ph:
-; SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 %n, [[TMP3]]
-; SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 %n, [[N_MOD_VF]]
-; SCALABLE-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
-; SCALABLE-NEXT: [[TMP16:%.*]] = mul nuw i64 [[TMP15]], 4
-; SCALABLE-NEXT: br label [[VECTOR_BODY:%.*]]
-; SCALABLE: vector.body:
-; SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[SRC1:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP5]], align 4
-; SCALABLE-NEXT: [[TMP8:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 35)
-; SCALABLE-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[SRC2:%.*]], i64 [[INDEX]]
-; SCALABLE-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP9]], i32 4, <vscale x 4 x i1> [[TMP8]], <vscale x 4 x i32> poison)
-; SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq <vscale x 4 x i32> [[WIDE_MASKED_LOAD]], splat (i32 2)
-; SCALABLE-NEXT: [[TMP13:%.*]] = or <vscale x 4 x i1> [[VEC_PHI]], [[TMP12]]
-; SCALABLE-NEXT: [[PREDPHI]] = select <vscale x 4 x i1> [[TMP8]], <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> [[VEC_PHI]]
-; SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]]
-; SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; SCALABLE-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
-; SCALABLE: middle.block:
-; SCALABLE-NEXT: [[TMP18:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[PREDPHI]])
-; SCALABLE-NEXT: [[FR:%.*]] = freeze i1 [[TMP18]]
-; SCALABLE-NEXT: [[RDX_SELECT:%.*]] = select i1 [[FR]], i32 1, i32 0
-; SCALABLE-NEXT: %cmp.n = icmp eq i64 %n, %n.vec
+; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP12:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[PREDPHI]])
+; CHECK-NEXT: [[TMP13:%.*]] = freeze i1 [[TMP12]]
+; CHECK-NEXT: [[RDX_SELECT:%.*]] = select i1 [[TMP13]], i32 1, i32 0
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_END_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[I_013:%.*]] = phi i64 [ [[INC:%.*]], %[[FOR_INC:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_012:%.*]] = phi i32 [ [[R_1:%.*]], %[[FOR_INC]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[SRC1]], i64 [[I_013]]
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP14]], 35
+; CHECK-NEXT: br i1 [[CMP1]], label %[[IF_THEN:.*]], label %[[FOR_INC]]
+; CHECK: [[IF_THEN]]:
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[SRC2]], i64 [[I_013]]
+; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i32 [[TMP15]], 2
+; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[CMP3]], i32 1, i32 [[R_012]]
+; CHECK-NEXT: br label %[[FOR_INC]]
+; CHECK: [[FOR_INC]]:
+; CHECK-NEXT: [[R_1]] = phi i32 [ [[R_012]], %[[FOR_BODY]] ], [ [[SPEC_SELECT]], %[[IF_THEN]] ]
+; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_013]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK: [[FOR_END_LOOPEXIT]]:
+; CHECK-NEXT: [[R_1_LCSSA:%.*]] = phi i32 [ [[R_1]], %[[FOR_INC]] ], [ [[RDX_SELECT]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[R_1_LCSSA]]
;
entry:
br label %for.body
@@ -446,5 +472,3 @@ for.end.loopexit: ; preds = %for.inc
%r.1.lcssa = phi i32 [ %r.1, %for.inc ]
ret i32 %r.1.lcssa
}
-
-attributes #0 = { "target-features"="+f,+v" }
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
index 9377854..38e7832 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
@@ -33,7 +33,7 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -42,6 +42,7 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -158,7 +159,7 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -167,6 +168,7 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -283,7 +285,7 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -292,6 +294,7 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -408,7 +411,7 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -417,6 +420,7 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -533,7 +537,7 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -542,6 +546,7 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -658,7 +663,7 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -667,6 +672,7 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -783,7 +789,7 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -792,6 +798,7 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -908,7 +915,7 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -917,6 +924,7 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1033,7 +1041,7 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1042,6 +1050,7 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1158,7 +1167,7 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1167,6 +1176,7 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1283,7 +1293,7 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1292,6 +1302,7 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1408,7 +1419,7 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1417,6 +1428,7 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1533,7 +1545,7 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 16, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 16 x i8> @llvm.vp.load.nxv16i8.p0(ptr align 1 [[TMP13]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
@@ -1542,6 +1554,7 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_OP]], ptr align 1 [[TMP16]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP18]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1661,7 +1674,7 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -1670,6 +1683,7 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP19]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1788,7 +1802,7 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -1797,6 +1811,7 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP19]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1915,7 +1930,7 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -1924,6 +1939,7 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP19]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -2042,7 +2058,7 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -2051,6 +2067,7 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP19]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -2222,7 +2239,7 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 100, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -2231,6 +2248,7 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP19]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 100
; IF-EVL-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
index f94f62d..f604745 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
@@ -38,7 +38,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -49,6 +49,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP29]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -186,7 +187,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -197,6 +198,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP29]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -334,7 +336,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -345,6 +347,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP29]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -482,7 +485,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -493,6 +496,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP29]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -626,7 +630,7 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -635,6 +639,7 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP24]], ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -754,7 +759,7 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP13:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
@@ -763,6 +768,7 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP17]], ptr align 4 [[TMP18]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP13]])
; IF-EVL-NEXT: [[TMP20:%.*]] = zext i32 [[TMP13]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; IF-EVL-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -882,7 +888,7 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -893,6 +899,7 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP15]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1020,7 +1027,7 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1031,6 +1038,7 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP15]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1158,7 +1166,7 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1167,6 +1175,7 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP24]], ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
index 2253724..ce2b790 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
@@ -33,7 +33,7 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr align 4 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META0:![0-9]+]]
@@ -42,6 +42,7 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP16]], ptr align 8 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -160,7 +161,7 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0(ptr align 4 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META10:![0-9]+]]
@@ -169,6 +170,7 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP16]], ptr align 8 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META13:![0-9]+]], !noalias [[META10]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -287,7 +289,7 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META17:![0-9]+]]
@@ -296,6 +298,7 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i32.p0(<vscale x 2 x i32> [[TMP16]], ptr align 4 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META20:![0-9]+]], !noalias [[META17]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -414,7 +417,7 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0(ptr align 4 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META24:![0-9]+]]
@@ -423,6 +426,7 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2f64.p0(<vscale x 2 x double> [[TMP16]], ptr align 8 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META27:![0-9]+]], !noalias [[META24]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -541,7 +545,7 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x double> @llvm.vp.load.nxv2f64.p0(ptr align 8 [[TMP14]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META31:![0-9]+]]
@@ -550,6 +554,7 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2f32.p0(<vscale x 2 x float> [[TMP16]], ptr align 4 [[TMP17]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META34:![0-9]+]], !noalias [[META31]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -668,7 +673,7 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
@@ -677,6 +682,7 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -795,7 +801,7 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
@@ -804,6 +810,7 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4f32.p0(<vscale x 4 x float> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -922,7 +929,7 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
@@ -931,6 +938,7 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1049,7 +1057,7 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
@@ -1058,6 +1066,7 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP18]], ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1176,7 +1185,7 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP16]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
@@ -1185,6 +1194,7 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2p0.p0(<vscale x 2 x ptr> [[TMP18]], ptr align 8 [[TMP19]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP14]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP46:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -1298,7 +1308,7 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[TMP13:%.*]] = mul i64 1, [[TMP12]]
@@ -1310,6 +1320,7 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP15]], ptr align 8 [[TMP16]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP48:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
index 48e080c..d02d53b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
@@ -36,7 +36,7 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP: vector.body:
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT1:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = sub i64 [[N]], [[EVL_BASED_IV1]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV1]]
; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
@@ -46,6 +46,7 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[TMP20]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP11]])
; IF-EVL-OUTLOOP-NEXT: [[TMP22:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT1]] = add i64 [[TMP22]], [[EVL_BASED_IV1]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP22]]
; IF-EVL-OUTLOOP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT1]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
@@ -87,7 +88,7 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP: vector.body:
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -97,6 +98,7 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[TMP22]] = add i32 [[TMP21]], [[VEC_PHI]]
; IF-EVL-INLOOP-NEXT: [[TMP23:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP23]]
; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
@@ -193,7 +195,7 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP9:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]])
-; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[TMP11]], [[VEC_PHI]]
+; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]]
; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -257,7 +259,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP: vector.body:
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP10:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP10]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP11]], i64 0
; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -272,6 +274,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[PREDPHI]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[PREDPHI1]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP11]])
; IF-EVL-OUTLOOP-NEXT: [[TMP23:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP10]], [[TMP23]]
; IF-EVL-OUTLOOP-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
@@ -317,7 +320,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP: vector.body:
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -326,6 +329,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[TMP22]] = add i32 [[TMP21]], [[VEC_PHI]]
; IF-EVL-INLOOP-NEXT: [[TMP23:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP23]]
; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
@@ -430,7 +434,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP9:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 3)
; NO-VP-INLOOP-NEXT: [[TMP10:%.*]] = select <vscale x 4 x i1> [[TMP9]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP10]])
-; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[TMP11]], [[VEC_PHI]]
+; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]]
; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -508,7 +512,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT1:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV1]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = mul i32 1, [[TMP12]]
; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP13]], i64 0
@@ -521,6 +525,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[TMP19]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP18]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP12]])
; IF-EVL-OUTLOOP-NEXT: [[TMP20:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT1]] = add i64 [[TMP20]], [[EVL_BASED_IV1]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; IF-EVL-OUTLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-OUTLOOP-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT1]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
@@ -568,7 +573,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT1:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[ADD:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV1]]
+; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = mul i32 1, [[TMP11]]
; IF-EVL-INLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP12]], i64 0
@@ -581,6 +586,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[ADD]] = add i32 [[TMP17]], [[RDX]]
; IF-EVL-INLOOP-NEXT: [[TMP19:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT1]] = add i64 [[TMP19]], [[EVL_BASED_IV1]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-INLOOP-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT1]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
@@ -697,7 +703,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP15:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_IND]]
; NO-VP-INLOOP-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP16]])
-; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[TMP17]], [[VEC_PHI]]
+; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[VEC_PHI]], [[TMP17]]
; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; NO-VP-INLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -768,7 +774,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_IND2:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION1]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT7:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP14]], i64 0
; IF-EVL-OUTLOOP-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -786,6 +792,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[TMP24]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[PREDPHI]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP14]])
; IF-EVL-OUTLOOP-NEXT: [[TMP25:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP25]], [[IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP25]]
; IF-EVL-OUTLOOP-NEXT: [[VEC_IND_NEXT7]] = add <vscale x 4 x i32> [[VEC_IND2]], [[BROADCAST_SPLAT2]]
; IF-EVL-OUTLOOP-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
@@ -837,7 +844,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP12:%.*]] = mul i32 1, [[TMP11]]
; IF-EVL-INLOOP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP12]], i64 0
@@ -849,6 +856,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[TMP17]] = add i32 [[TMP16]], [[VEC_PHI]]
; IF-EVL-INLOOP-NEXT: [[TMP18:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-INLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK1:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
@@ -973,7 +981,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; NO-VP-INLOOP-NEXT: [[TMP15:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD]], [[VEC_IND]]
; NO-VP-INLOOP-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> zeroinitializer
; NO-VP-INLOOP-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP16]])
-; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[TMP17]], [[VEC_PHI]]
+; NO-VP-INLOOP-NEXT: [[TMP18]] = add i32 [[VEC_PHI]], [[TMP17]]
; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-INLOOP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i32> [[VEC_IND]], [[DOTSPLAT]]
; NO-VP-INLOOP-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
index 9299866..ae047f5 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
@@ -24,7 +24,7 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
@@ -36,6 +36,7 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -144,7 +145,7 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
@@ -156,6 +157,7 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -263,7 +265,7 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
@@ -275,6 +277,7 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -382,7 +385,7 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
@@ -394,6 +397,7 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
index f8f3532..987f946 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
@@ -32,8 +32,8 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL: [[VECTOR_BODY]]:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[VP_OP_LOAD:%.*]], %[[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP25]], %[[VECTOR_PH]] ], [ [[TMP12:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[TC]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP12]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -43,6 +43,7 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP19]]
; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC]]
; IF-EVL-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -168,8 +169,8 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[VP_OP_LOAD:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR2:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT1]], %[[VECTOR_PH]] ], [ [[TMP19:%.*]], %[[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP32]], %[[VECTOR_PH]] ], [ [[TMP15:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[TC]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP15]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP15]])
@@ -180,6 +181,7 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP]], ptr align 4 [[TMP21]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP15]])
; IF-EVL-NEXT: [[TMP23:%.*]] = zext i32 [[TMP15]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP23]]
; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC]]
; IF-EVL-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -325,8 +327,8 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[VP_OP_LOAD:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR2:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT1]], %[[VECTOR_PH]] ], [ [[TMP22:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR4:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT3]], %[[VECTOR_PH]] ], [ [[TMP23:%.*]], %[[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP39]], %[[VECTOR_PH]] ], [ [[TMP18:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[TC]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP18]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP20]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP18]])
@@ -339,6 +341,7 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP5]], ptr align 4 [[TMP25]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP18]])
; IF-EVL-NEXT: [[TMP27:%.*]] = zext i32 [[TMP18]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP27]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP27]]
; IF-EVL-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC]]
; IF-EVL-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: [[MIDDLE_BLOCK]]:
@@ -639,8 +642,8 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) {
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 2 x i64> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[TMP20:%.*]], %[[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[PREV_EVL:%.*]] = phi i32 [ [[TMP5]], %[[VECTOR_PH]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[TC]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP11]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP7:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP7]]
@@ -652,6 +655,7 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP15]], ptr align 8 [[TMP9]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP21]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC]]
; IF-EVL-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
index 0eab77d..2aeb1d0 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
@@ -27,7 +27,7 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[TMP13:%.*]] = mul i64 1, [[TMP12]]
@@ -41,6 +41,7 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> [[WIDE_MASKED_GATHER2]], <vscale x 2 x ptr> align 4 [[TMP16]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP17:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP17]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP17]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
index c719912..3e23df7 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
@@ -26,16 +26,17 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.add.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.add.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP15]] = add i32 [[TMP14]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -75,7 +76,7 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP10]] = add i32 [[TMP9]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10]] = add i32 [[VEC_PHI]], [[TMP9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -129,20 +130,20 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 1, [[ENTRY]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 1, [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 4
-; IF-EVL-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
-; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
-; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP4]])
-; IF-EVL-NEXT: [[MUL]] = mul i32 [[TMP5]], [[RDX]]
+; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
+; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; IF-EVL-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD2]])
-; IF-EVL-NEXT: [[TMP6]] = mul i32 [[TMP8]], [[VEC_PHI1]]
+; IF-EVL-NEXT: [[MUL]] = mul i32 [[RDX]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD3]])
+; IF-EVL-NEXT: [[TMP5]] = mul i32 [[VEC_PHI1]], [[TMP4]]
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8
; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP6]], [[MUL]]
+; IF-EVL-NEXT: [[BIN_RDX:%.*]] = mul i32 [[TMP5]], [[MUL]]
; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; IF-EVL: scalar.ph:
@@ -179,9 +180,9 @@ define i32 @mul(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4
; NO-VP-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP7]] = mul i32 [[TMP6]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP7]] = mul i32 [[VEC_PHI]], [[TMP6]]
; NO-VP-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD2]])
-; NO-VP-NEXT: [[TMP9]] = mul i32 [[TMP8]], [[VEC_PHI1]]
+; NO-VP-NEXT: [[TMP9]] = mul i32 [[VEC_PHI1]], [[TMP8]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -240,16 +241,17 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.or.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.or.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP15]] = or i32 [[TMP14]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -289,7 +291,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP10]] = or i32 [[TMP9]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10]] = or i32 [[VEC_PHI]], [[TMP9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -347,16 +349,17 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.and.nxv4i32(i32 -1, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.and.nxv4i32(i32 -1, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP15]] = and i32 [[TMP14]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -396,7 +399,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP10]] = and i32 [[TMP9]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10]] = and i32 [[VEC_PHI]], [[TMP9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
@@ -454,16 +457,17 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.xor.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.xor.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP15]] = xor i32 [[TMP14]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -503,7 +507,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP10]] = xor i32 [[TMP9]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10]] = xor i32 [[VEC_PHI]], [[TMP9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
@@ -556,21 +560,22 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.smin.nxv4i32(i32 2147483647, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.smin.nxv4i32(i32 2147483647, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smin.i32(i32 [[TMP14]], i32 [[VEC_PHI]])
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -666,21 +671,22 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.smax.nxv4i32(i32 -2147483648, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.smax.nxv4i32(i32 -2147483648, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX]] = call i32 @llvm.smax.i32(i32 [[TMP14]], i32 [[VEC_PHI]])
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -776,21 +782,22 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.umin.nxv4i32(i32 -1, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.umin.nxv4i32(i32 -1, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX]] = call i32 @llvm.umin.i32(i32 [[TMP14]], i32 [[VEC_PHI]])
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -886,21 +893,22 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.umax.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vp.reduce.umax.nxv4i32(i32 0, <vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX]] = call i32 @llvm.umax.i32(i32 [[TMP14]], i32 [[VEC_PHI]])
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -1001,16 +1009,17 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call reassoc float @llvm.vp.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call reassoc float @llvm.vp.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP15]] = fadd reassoc float [[TMP14]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -1050,7 +1059,7 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]]
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP7]], align 4
; NO-VP-NEXT: [[TMP9:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP10]] = fadd reassoc float [[TMP9]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP10]] = fadd reassoc float [[VEC_PHI]], [[TMP9]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
@@ -1104,20 +1113,20 @@ define float @fmul(ptr %a, i64 %n, float %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START:%.*]], [[ENTRY]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi float [ 1.000000e+00, [[ENTRY]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi float [ 1.000000e+00, [[ENTRY]] ], [ [[TMP5:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 4
-; IF-EVL-NEXT: [[TMP4:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
-; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
-; IF-EVL-NEXT: [[TMP5:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP4]])
-; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[TMP5]], [[RDX]]
+; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
+; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
; IF-EVL-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD2]])
-; IF-EVL-NEXT: [[TMP6]] = fmul reassoc float [[TMP8]], [[VEC_PHI1]]
+; IF-EVL-NEXT: [[MUL]] = fmul reassoc float [[RDX]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP4:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD3]])
+; IF-EVL-NEXT: [[TMP5]] = fmul reassoc float [[VEC_PHI1]], [[TMP4]]
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 8
; IF-EVL-NEXT: [[TMP7:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: [[BIN_RDX:%.*]] = fmul reassoc float [[TMP6]], [[MUL]]
+; IF-EVL-NEXT: [[BIN_RDX:%.*]] = fmul reassoc float [[TMP5]], [[MUL]]
; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
; IF-EVL: scalar.ph:
@@ -1154,9 +1163,9 @@ define float @fmul(ptr %a, i64 %n, float %start) {
; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP5]], align 4
; NO-VP-NEXT: [[TMP6:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD]])
-; NO-VP-NEXT: [[TMP7]] = fmul reassoc float [[TMP6]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP7]] = fmul reassoc float [[VEC_PHI]], [[TMP6]]
; NO-VP-NEXT: [[TMP8:%.*]] = call reassoc float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD2]])
-; NO-VP-NEXT: [[TMP9]] = fmul reassoc float [[TMP8]], [[VEC_PHI1]]
+; NO-VP-NEXT: [[TMP9]] = fmul reassoc float [[VEC_PHI1]], [[TMP8]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
@@ -1210,22 +1219,23 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX_SELECT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call fast float @llvm.vp.reduce.fmin.nxv4f32(float 0x47EFFFFFE0000000, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call fast float @llvm.vp.reduce.fmin.nxv4f32(float 0x47EFFFFFE0000000, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX_CMP:%.*]] = fcmp fast olt float [[TMP14]], [[VEC_PHI]]
; IF-EVL-NEXT: [[RDX_MINMAX_SELECT]] = select fast i1 [[RDX_MINMAX_CMP]], float [[TMP14]], float [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -1322,22 +1332,23 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]]
; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
; IF-EVL-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
-; IF-EVL-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP7]], 4
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[RDX_MINMAX_SELECT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = call fast float @llvm.vp.reduce.fmax.nxv4f32(float 0xC7EFFFFFE0000000, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP14:%.*]] = call fast float @llvm.vp.reduce.fmax.nxv4f32(float 0xC7EFFFFFE0000000, <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[RDX_MINMAX_CMP:%.*]] = fcmp fast ogt float [[TMP14]], [[VEC_PHI]]
; IF-EVL-NEXT: [[RDX_MINMAX_SELECT]] = select fast i1 [[RDX_MINMAX_CMP]], float [[TMP14]], float [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; IF-EVL-NEXT: [[TMP8:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP8]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -1435,19 +1446,19 @@ define float @fminimum(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_PHI2:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8
-; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4
-; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP1]], align 4
-; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]])
+; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP2]], align 4
+; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x float>, ptr [[TMP1]], align 4
; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]])
+; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]])
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16
; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]])
+; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.minimum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]])
; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> [[TMP5]])
; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -1543,19 +1554,19 @@ define float @fmaximum(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI1:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP3:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[VEC_PHI2:%.*]] = phi <8 x float> [ [[BROADCAST_SPLAT]], [[ENTRY]] ], [ [[TMP4:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 8
-; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = load <8 x float>, ptr [[TMP2]], align 4
-; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP1]], align 4
-; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI]], <8 x float> [[WIDE_MASKED_LOAD]])
+; IF-EVL-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x float>, ptr [[TMP2]], align 4
+; IF-EVL-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x float>, ptr [[TMP1]], align 4
; IF-EVL-NEXT: [[TMP3]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI1]], <8 x float> [[WIDE_LOAD2]])
+; IF-EVL-NEXT: [[TMP4]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[VEC_PHI2]], <8 x float> [[WIDE_LOAD3]])
; IF-EVL-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 16
; IF-EVL-NEXT: [[TMP6:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]]
; IF-EVL: middle.block:
-; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP4]], <8 x float> [[TMP3]])
+; IF-EVL-NEXT: [[TMP5:%.*]] = call <8 x float> @llvm.maximum.v8f32(<8 x float> [[TMP3]], <8 x float> [[TMP4]])
; IF-EVL-NEXT: [[TMP7:%.*]] = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> [[TMP5]])
; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_RND_UP]], [[N_VEC]]
; IF-EVL-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
@@ -1655,19 +1666,20 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ [[START:%.*]], [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
-; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
+; IF-EVL-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP10]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
+; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[VP_OP_LOAD1:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP13]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = fmul reassoc <vscale x 4 x float> [[VP_OP_LOAD]], [[VP_OP_LOAD1]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = call reassoc float @llvm.vp.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
+; IF-EVL-NEXT: [[TMP17:%.*]] = call reassoc float @llvm.vp.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP18]] = fadd reassoc float [[TMP17]], [[VEC_PHI]]
-; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP19]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP11]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
+; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: br label [[FOR_END:%.*]]
; IF-EVL: scalar.ph:
@@ -1712,7 +1724,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x float>, ptr [[TMP9]], align 4
; NO-VP-NEXT: [[TMP11:%.*]] = fmul reassoc <vscale x 4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; NO-VP-NEXT: [[TMP12:%.*]] = call reassoc float @llvm.vector.reduce.fadd.nxv4f32(float -0.000000e+00, <vscale x 4 x float> [[TMP11]])
-; NO-VP-NEXT: [[TMP13]] = fadd reassoc float [[TMP12]], [[VEC_PHI]]
+; NO-VP-NEXT: [[TMP13]] = fadd reassoc float [[VEC_PHI]], [[TMP12]]
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
@@ -1774,16 +1786,17 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = icmp slt <vscale x 4 x i32> [[VP_OP_LOAD]], splat (i32 3)
-; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP17:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP17]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP13:%.*]] = icmp slt <vscale x 4 x i32> [[VP_OP_LOAD]], splat (i32 3)
+; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP16]])
; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]]
@@ -1890,16 +1903,17 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP14:%.*]] = fcmp fast olt <vscale x 4 x float> [[VP_OP_LOAD]], splat (float 3.000000e+00)
-; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP10]])
-; IF-EVL-NEXT: [[TMP17:%.*]] = zext i32 [[TMP10]] to i64
-; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP17]], [[EVL_BASED_IV]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
+; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP13:%.*]] = fcmp fast olt <vscale x 4 x float> [[VP_OP_LOAD]], splat (float 3.000000e+00)
+; IF-EVL-NEXT: [[TMP16]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP11]])
+; IF-EVL-NEXT: [[TMP9:%.*]] = zext i32 [[TMP11]] to i64
+; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; IF-EVL-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; IF-EVL-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
; IF-EVL: middle.block:
; IF-EVL-NEXT: [[TMP19:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1(<vscale x 4 x i1> [[TMP16]])
; IF-EVL-NEXT: [[TMP20:%.*]] = freeze i1 [[TMP19]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
index 2678989..1f7c518 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
@@ -28,7 +28,7 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP13]]
@@ -43,6 +43,7 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP26]], ptr align 4 [[TMP18]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP11]])
; IF-EVL-NEXT: [[TMP14:%.*]] = zext i32 [[TMP11]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP14]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
; IF-EVL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
index 03bedde..be6ae1d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
@@ -44,7 +44,7 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-OUTLOOP: vector.body:
; IF-EVL-OUTLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP10]], [[ENTRY]] ], [ [[TMP19:%.*]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[TMP11:%.*]] = phi i64 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-OUTLOOP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-OUTLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]]), !alias.scope [[META0:![0-9]+]]
@@ -52,6 +52,7 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-OUTLOOP-NEXT: [[TMP19]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP12]])
; IF-EVL-OUTLOOP-NEXT: [[TMP21:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-OUTLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-OUTLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP21]]
; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-OUTLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; IF-EVL-OUTLOOP: middle.block:
@@ -100,7 +101,7 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-INLOOP: vector.body:
; IF-EVL-INLOOP-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[VEC_PHI:%.*]] = phi i32 [ [[START]], [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[TMP14:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP13]], i32 4, i1 true)
; IF-EVL-INLOOP-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[EVL_BASED_IV]]
; IF-EVL-INLOOP-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP19]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP14]]), !alias.scope [[META0:![0-9]+]]
@@ -108,6 +109,7 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; IF-EVL-INLOOP-NEXT: [[TMP22]] = add i32 [[TMP21]], [[VEC_PHI]]
; IF-EVL-INLOOP-NEXT: [[TMP23:%.*]] = zext i32 [[TMP14]] to i64
; IF-EVL-INLOOP-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]]
+; IF-EVL-INLOOP-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP13]], [[TMP23]]
; IF-EVL-INLOOP-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-INLOOP-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; IF-EVL-INLOOP: middle.block:
@@ -214,7 +216,7 @@ define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr)
; NO-VP-INLOOP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
; NO-VP-INLOOP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4, !alias.scope [[META0:![0-9]+]]
; NO-VP-INLOOP-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
-; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[TMP11]], [[VEC_PHI]]
+; NO-VP-INLOOP-NEXT: [[TMP12]] = add i32 [[VEC_PHI]], [[TMP11]]
; NO-VP-INLOOP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
; NO-VP-INLOOP-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; NO-VP-INLOOP-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
index 7082c12d..d474a03 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
@@ -23,13 +23,14 @@ define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) {
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i32 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i32 [ [[N]], [[ENTRY]] ], [ [[AVL_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[EVL_BASED_IV]]
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP_LOAD]], ptr align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i32 [[TMP12]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i32 [[TMP11]], [[TMP12]]
; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
index 3b3b798..06c6bfe 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
@@ -27,7 +27,7 @@ define void @trip_count_max_1024(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[UMAX]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[UMAX]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
@@ -35,6 +35,7 @@ define void @trip_count_max_1024(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP9]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP13]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[UMAX]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -95,7 +96,7 @@ define void @overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[TC]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TC]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
@@ -103,6 +104,7 @@ define void @overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
; CHECK-NEXT: [[TMP13:%.*]] = zext i32 [[TMP9]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP13]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP13]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -163,7 +165,7 @@ define void @no_overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[TC_ADD]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TC_ADD]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
@@ -171,6 +173,7 @@ define void @no_overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP5]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TC_ADD]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
index b6f2a50..5f407fc 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
@@ -23,7 +23,7 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) {
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP15]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -34,6 +34,7 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP]], ptr align 4 [[TMP19]], <vscale x 4 x i1> [[TMP17]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP9]], [[TMP21]]
; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
index 1a6e60d8..59d1370 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
@@ -26,13 +26,14 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi float [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP9:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP9:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP9]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP14]] = call float @llvm.vp.reduce.fadd.nxv4f32(float [[VEC_PHI]], <vscale x 4 x float> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP9]], [[TMP15]]
; IF-EVL-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
index ba2ee84f..2d5718b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
@@ -25,7 +25,7 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -33,6 +33,7 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP14]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
@@ -242,7 +243,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -250,6 +251,7 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP14]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: middle.block:
@@ -352,7 +354,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -360,6 +362,7 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP14]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: middle.block:
@@ -462,7 +465,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -470,6 +473,7 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP14]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[VP_OP]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: middle.block:
@@ -573,7 +577,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -582,6 +586,7 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP14]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; IF-EVL: middle.block:
@@ -690,7 +695,7 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -699,6 +704,7 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP14]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; IF-EVL: middle.block:
@@ -807,7 +813,7 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -816,6 +822,7 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP14]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; IF-EVL: middle.block:
@@ -924,7 +931,7 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -933,6 +940,7 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> [[TMP14]], <vscale x 4 x i32> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; IF-EVL: middle.block:
@@ -1040,7 +1048,7 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -1048,6 +1056,7 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP14]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[VP_OP]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP15:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP15]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; IF-EVL: middle.block:
@@ -1258,7 +1267,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1267,6 +1276,7 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP14]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; IF-EVL: middle.block:
@@ -1375,7 +1385,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[BROADCAST_SPLAT]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1384,6 +1394,7 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP14]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
; IF-EVL: middle.block:
@@ -1707,7 +1718,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x float> [ [[TMP9]], [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP10]])
@@ -1717,6 +1728,7 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL-NEXT: [[TMP17]] = call <vscale x 4 x float> @llvm.vp.merge.nxv4f32(<vscale x 4 x i1> splat (i1 true), <vscale x 4 x float> [[TMP16]], <vscale x 4 x float> [[VEC_PHI]], i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP18]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP18]]
; IF-EVL-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]]
; IF-EVL: middle.block:
@@ -1826,7 +1838,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1834,6 +1846,7 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
; IF-EVL: middle.block:
@@ -1942,7 +1955,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i1> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0(ptr align 4 [[TMP11]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP9]])
@@ -1950,6 +1963,7 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[TMP15]] = call <vscale x 4 x i1> @llvm.vp.merge.nxv4i1(<vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i1> [[VEC_PHI]], i32 [[TMP9]])
; IF-EVL-NEXT: [[TMP16:%.*]] = zext i32 [[TMP9]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP16]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP16]]
; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
; IF-EVL: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
index 297a410..e2db28d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
@@ -23,7 +23,7 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL:%.*]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], -1
@@ -47,6 +47,7 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE3]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP20:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP20]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
@@ -164,7 +165,7 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1024, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL:%.*]], [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[OFFSET_IDX3:%.*]] = trunc i64 [[EVL_BASED_IV]] to i32
@@ -194,6 +195,7 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE5]], ptr align 4 [[TMP25]], <vscale x 4 x i1> [[VP_REVERSE_MASK6]], i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP28:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP28]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP28]]
; IF-EVL-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1024
; IF-EVL-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; IF-EVL: middle.block:
@@ -343,7 +345,7 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 1025, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 1025, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 16, i1 true)
; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1024, [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
@@ -377,6 +379,7 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr
; IF-EVL-NEXT: call void @llvm.vp.store.nxv16i8.p0(<vscale x 16 x i8> [[VP_REVERSE2]], ptr align 1 [[TMP26]], <vscale x 16 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP27:%.*]] = zext i32 [[TMP6]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP27]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP27]]
; IF-EVL-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; IF-EVL-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; IF-EVL: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
index 47f1cfb..1c78b25 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
@@ -26,7 +26,7 @@ define void @test(ptr %p) {
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP5:%.*]] = sub i64 200, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP5:%.*]] = phi i64 [ 200, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP5]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
@@ -35,6 +35,7 @@ define void @test(ptr %p) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP_LOAD]], ptr align 8 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP6]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP13]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP5]], [[TMP13]]
; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 200
; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
@@ -356,7 +357,7 @@ define void @trivial_due_max_vscale(ptr %p) {
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP5:%.*]] = sub i64 200, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP5:%.*]] = phi i64 [ 200, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP5]], i32 2, i1 true)
; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0(ptr align 32 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
@@ -365,6 +366,7 @@ define void @trivial_due_max_vscale(ptr %p) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_OP_LOAD]], ptr align 32 [[TMP11]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
; IF-EVL-NEXT: [[TMP13:%.*]] = zext i32 [[TMP6]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP13]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP5]], [[TMP13]]
; IF-EVL-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 200
; IF-EVL-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; IF-EVL: middle.block:
@@ -461,7 +463,7 @@ define void @no_high_lmul_or_interleave(ptr %p) {
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[AVL:%.*]] = sub i64 3002, [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL:%.*]] = phi i64 [ 3002, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP9:%.*]] = icmp ult i64 [[AVL]], 1024
; IF-EVL-NEXT: [[SAFE_AVL:%.*]] = select i1 [[TMP9]], i64 [[AVL]], i64 1024
; IF-EVL-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[SAFE_AVL]], i32 1, i1 true)
@@ -472,6 +474,7 @@ define void @no_high_lmul_or_interleave(ptr %p) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv1i64.p0(<vscale x 1 x i64> [[VP_OP_LOAD]], ptr align 32 [[TMP5]], <vscale x 1 x i1> splat (i1 true), i32 [[TMP10]])
; IF-EVL-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
; IF-EVL-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 3002
; IF-EVL-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; IF-EVL: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
index 4ce2da7..687a2e7 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
@@ -26,7 +26,7 @@ define void @lshift_significand(i32 %n, ptr nocapture writeonly %dst) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[TMP0]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[SPEC_SELECT]], [[EVL_BASED_IV]]
; CHECK-NEXT: [[TMP12:%.*]] = sub nuw nsw i64 1, [[OFFSET_IDX]]
@@ -41,6 +41,7 @@ define void @lshift_significand(i32 %n, ptr nocapture writeonly %dst) {
; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[VP_REVERSE]], ptr align 8 [[TMP19]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
index c21847f..24649729 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
@@ -166,10 +166,11 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i32 9, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 9, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 4, i1 true)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i8.nxv4p0(<vscale x 4 x i8> zeroinitializer, <vscale x 4 x ptr> align 1 [[BROADCAST_SPLAT2]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP6]])
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP6]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP6]]
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_EVL_NEXT]], 9
; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
index b40d980..dfdc893 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
@@ -22,11 +22,12 @@ define void @truncate_to_minimal_bitwidths_widen_cast_recipe(ptr %src) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 9, [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 9, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i8.nxv8p0(<vscale x 8 x i8> zeroinitializer, <vscale x 8 x ptr> align 1 zeroinitializer, <vscale x 8 x i1> splat (i1 true), i32 [[TMP7]])
; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP7]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP9]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 9
; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
index 46ecb19..6476373 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
@@ -33,7 +33,7 @@ define void @type_info_cache_clobber(ptr %dstv, ptr %src, i64 %wide.trip.count)
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[AVL:%.*]] = sub i64 [[TMP0]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[TMP0]], %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true)
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[EVL_BASED_IV]]
; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr align 1 [[TMP13]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP11]]), !alias.scope [[META0:![0-9]+]]
@@ -47,6 +47,7 @@ define void @type_info_cache_clobber(ptr %dstv, ptr %src, i64 %wide.trip.count)
; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x ptr> align 2 zeroinitializer, <vscale x 8 x i1> splat (i1 true), i32 [[TMP11]])
; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP11]] to i64
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]]
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[TMP0]]
; CHECK-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
index 41b9636..568aa95 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
@@ -99,7 +99,7 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: [[TMP5:%.*]] = load i64, ptr [[B]], align 8
; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP5]], i64 0
@@ -108,6 +108,7 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = zext i32 [[TMP6]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
@@ -410,7 +411,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; TF-SCALABLE: [[VECTOR_BODY]]:
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true)
; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP7]], i64 0
; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT3]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
@@ -428,9 +429,10 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv4i64.p0(<vscale x 4 x i64> [[PREDPHI]], ptr align 8 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP7]])
; TF-SCALABLE-NEXT: [[TMP15:%.*]] = zext i32 [[TMP7]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP15]], [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP15]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
-; TF-SCALABLE-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; TF-SCALABLE-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
+; TF-SCALABLE-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
; TF-SCALABLE: [[SCALAR_PH]]:
@@ -569,7 +571,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP6:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: [[TMP5:%.*]] = load i64, ptr [[B]], align 1
; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP5]], i64 0
@@ -578,6 +580,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP7]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP6]])
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = zext i32 [[TMP6]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]]
; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
@@ -707,13 +710,14 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8
; TF-SCALABLE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; TF-SCALABLE-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
@@ -865,7 +869,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
; TF-SCALABLE: [[VECTOR_BODY]]:
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: [[TMP13:%.*]] = zext i32 [[TMP9]] to i64
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP13]]
@@ -876,6 +880,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT3]], ptr align 8 [[TMP10]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
; TF-SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP11]], [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP11]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
@@ -1041,7 +1046,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; TF-SCALABLE: [[VECTOR_BODY]]:
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP9:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: [[TMP11:%.*]] = zext i32 [[TMP9]] to i64
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP11]]
@@ -1053,6 +1058,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT1]], ptr align 8 [[TMP12]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP9]])
; TF-SCALABLE-NEXT: [[TMP14:%.*]] = zext i32 [[TMP9]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP14]], [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP14]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; TF-SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
@@ -1194,13 +1200,14 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap
; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
; TF-SCALABLE: [[VECTOR_BODY]]:
; TF-SCALABLE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TF-SCALABLE-NEXT: [[AVL:%.*]] = sub i64 1025, [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL:%.*]] = phi i64 [ 1025, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1
; TF-SCALABLE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0(<vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr align 8 [[TMP6]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP5]])
; TF-SCALABLE-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]]
+; TF-SCALABLE-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]]
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], 1025
; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
index 65fc18a..85116fe 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
@@ -23,7 +23,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]]
; IF-EVL: vector.body:
; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[TMP11:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true)
; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[EVL_BASED_IV]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP14]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
@@ -34,6 +34,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_OP]], ptr align 4 [[TMP18]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP12]])
; IF-EVL-NEXT: [[TMP20:%.*]] = zext i32 [[TMP12]] to i64
; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]]
+; IF-EVL-NEXT: [[AVL_NEXT]] = sub nuw i64 [[TMP11]], [[TMP20]]
; IF-EVL-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
; IF-EVL-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; IF-EVL: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll
index 7f1066c..c058789 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll
@@ -20,7 +20,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -35,6 +35,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[SMAX]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -76,7 +77,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -91,6 +92,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[SMIN]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -132,7 +134,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -147,6 +149,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[UMAX]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -188,7 +191,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -203,6 +206,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[UMIN]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -244,7 +248,7 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -256,6 +260,7 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[CTLZ]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -295,7 +300,7 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -307,6 +312,7 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[CTTZ]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -346,7 +352,7 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -360,6 +366,7 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[TRUNC]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -401,7 +408,7 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -415,6 +422,7 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[TRUNC]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -456,7 +464,7 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -468,6 +476,7 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[ABS]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll
index c1b656a..8d3fe48 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll
@@ -19,7 +19,7 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -31,6 +31,7 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[SEXT]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -72,7 +73,7 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -84,6 +85,7 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[ZEXT]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -123,7 +125,7 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -135,6 +137,7 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[TRUNC]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -174,7 +177,7 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -186,6 +189,7 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPEXT]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -225,7 +229,7 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -237,6 +241,7 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPTRUNC]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -276,7 +281,7 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -288,6 +293,7 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[SITOFP]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -327,7 +333,7 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -339,6 +345,7 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[UITOFP]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -378,7 +385,7 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -390,6 +397,7 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPTOSI]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -429,7 +437,7 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -441,6 +449,7 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[FPTOUI]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -480,7 +489,7 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -492,6 +501,7 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[INTTOPTR]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
@@ -532,7 +542,7 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: EMIT vp<[[INDEX:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[INDEX_NEXT:%.+]]>
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[INDEX_EVL:%.+]]> = phi ir<0>, vp<[[INDEX_EVL_NEXT:%.+]]>
; IF-EVL-NEXT: ir<[[IV:%.+]]> = WIDEN-INDUCTION ir<0>, ir<1>, vp<[[EVL]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<[[N]]>, vp<[[INDEX_EVL]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[INDEX_EVL]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: WIDEN-GEP Inv[Var] ir<[[GEP:%.+]]> = getelementptr inbounds ir<%b>, ir<[[IV]]>
@@ -542,6 +552,7 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[VECTOR_PTR]]>, ir<[[PTRTOINT]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[ZEXT:%.+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[INDEX_EVL_NEXT]]> = add vp<[[ZEXT]]>, vp<[[INDEX_EVL]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[ZEXT]]>
; IF-EVL-NEXT: EMIT vp<[[INDEX_NEXT]]> = add vp<[[INDEX]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[INDEX_NEXT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll
index 9900602..ab4bb90 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll
@@ -26,8 +26,8 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
; IF-EVL-NEXT: FIRST-ORDER-RECURRENCE-PHI ir<[[FOR_PHI:%.+]]> = phi ir<33>, ir<[[LD:%.+]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%TC>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[PREV_EVL:%.+]]> = phi [ vp<[[VF32]]>, vector.ph ], [ vp<[[EVL:%.+]]>, vector.body ]
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%TC>, vp<[[EVL_PHI]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds nuw ir<%A>, vp<[[ST]]
@@ -40,6 +40,7 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR2]]>, ir<[[ADD]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll
index 1c9554d..dff4971 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll
@@ -40,7 +40,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-OUTLOOP-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
; IF-EVL-OUTLOOP-NEXT: WIDEN-REDUCTION-PHI ir<[[RDX_PHI:%.+]]> = phi vp<[[RDX_START]]>, vp<[[RDX_SELECT:%.+]]>
-; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%n>, vp<[[EVL_PHI]]>
+; IF-EVL-OUTLOOP-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%n>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-OUTLOOP-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-OUTLOOP-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-OUTLOOP-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
@@ -50,6 +50,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: WIDEN-INTRINSIC vp<[[RDX_SELECT]]> = call llvm.vp.merge(ir<true>, ir<[[ADD]]>, ir<[[RDX_PHI]]>, vp<[[EVL]]>)
; IF-EVL-OUTLOOP-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-OUTLOOP-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-OUTLOOP-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-OUTLOOP-NEXT: No successors
@@ -79,7 +80,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-INLOOP-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
; IF-EVL-INLOOP-NEXT: WIDEN-REDUCTION-PHI ir<[[RDX_PHI:%.+]]> = phi vp<[[RDX_START]]>, ir<[[RDX_NEXT:%.+]]>
-; IF-EVL-INLOOP-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%n>, vp<[[EVL_PHI]]>
+; IF-EVL-INLOOP-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%n>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-INLOOP-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-INLOOP-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-INLOOP-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]>
@@ -88,6 +89,7 @@ define i32 @reduction(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: REDUCE ir<[[ADD:%.+]]> = ir<[[RDX_PHI]]> + vp.reduce.add (ir<[[LD1]]>, vp<[[EVL]]>)
; IF-EVL-INLOOP-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-INLOOP-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-INLOOP-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-INLOOP-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-INLOOP-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-INLOOP-NEXT: No successors
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll
index 42a846a..b3a611e 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll
@@ -24,7 +24,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: vector.body:
; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION
; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%.+]]>
-; IF-EVL-NEXT: EMIT vp<[[AVL:%.+]]> = sub ir<%N>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT-SCALAR vp<[[AVL:%.+]]> = phi [ ir<%N>, vector.ph ], [ vp<[[AVL_NEXT:%.+]]>, vector.body ]
; IF-EVL-NEXT: EMIT-SCALAR vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[AVL]]>
; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1>, vp<[[EVL]]>
; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]>
@@ -39,6 +39,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: WIDEN vp.store vp<[[PTR3]]>, ir<[[ADD]]>, vp<[[EVL]]>
; IF-EVL-NEXT: EMIT-SCALAR vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]>
+; IF-EVL-NEXT: EMIT vp<[[AVL_NEXT]]> = sub nuw vp<[[AVL]]>, vp<[[CAST]]>
; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%.+]]> = add vp<[[IV]]>, vp<[[VFUF]]>
; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]>
; IF-EVL-NEXT: No successors
diff --git a/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll b/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll
index e89f41b..97b5210 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/float-induction-x86.ll
@@ -142,40 +142,40 @@ define void @fp_iv_loop2(ptr noalias nocapture %A, i32 %N) {
; AUTO_VEC-NEXT: br i1 [[TMP0]], label [[FOR_END_LOOPEXIT_UNR_LCSSA:%.*]], label [[FOR_BODY_PREHEADER_NEW:%.*]]
; AUTO_VEC: for.body.preheader.new:
; AUTO_VEC-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[ZEXT]], 2147483640
-; AUTO_VEC-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 4
-; AUTO_VEC-NEXT: [[INVARIANT_GEP1:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 8
-; AUTO_VEC-NEXT: [[INVARIANT_GEP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 12
-; AUTO_VEC-NEXT: [[INVARIANT_GEP5:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16
-; AUTO_VEC-NEXT: [[INVARIANT_GEP7:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 20
-; AUTO_VEC-NEXT: [[INVARIANT_GEP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 24
-; AUTO_VEC-NEXT: [[INVARIANT_GEP11:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 28
; AUTO_VEC-NEXT: br label [[FOR_BODY:%.*]]
; AUTO_VEC: for.body:
; AUTO_VEC-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[INDVARS_IV_NEXT_7:%.*]], [[FOR_BODY]] ]
; AUTO_VEC-NEXT: [[X_06:%.*]] = phi float [ 1.000000e+00, [[FOR_BODY_PREHEADER_NEW]] ], [ [[CONV1_7:%.*]], [[FOR_BODY]] ]
; AUTO_VEC-NEXT: [[NITER:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER_NEW]] ], [ [[NITER_NEXT_7:%.*]], [[FOR_BODY]] ]
-; AUTO_VEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A:%.*]], i64 [[INDVARS_IV]]
; AUTO_VEC-NEXT: store float [[X_06]], ptr [[ARRAYIDX]], align 4
; AUTO_VEC-NEXT: [[CONV1:%.*]] = fadd float [[X_06]], 5.000000e-01
-; AUTO_VEC-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[INVARIANT_GEP]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i64 4
; AUTO_VEC-NEXT: store float [[CONV1]], ptr [[ARRAYIDX_1]], align 4
; AUTO_VEC-NEXT: [[CONV1_1:%.*]] = fadd float [[CONV1]], 5.000000e-01
-; AUTO_VEC-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[INVARIANT_GEP1]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 8
; AUTO_VEC-NEXT: store float [[CONV1_1]], ptr [[ARRAYIDX_2]], align 4
; AUTO_VEC-NEXT: [[CONV1_2:%.*]] = fadd float [[CONV1_1]], 5.000000e-01
-; AUTO_VEC-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[INVARIANT_GEP3]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP3]], i64 12
; AUTO_VEC-NEXT: store float [[CONV1_2]], ptr [[ARRAYIDX_3]], align 4
; AUTO_VEC-NEXT: [[CONV1_3:%.*]] = fadd float [[CONV1_2]], 5.000000e-01
-; AUTO_VEC-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, ptr [[INVARIANT_GEP5]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP4]], i64 16
; AUTO_VEC-NEXT: store float [[CONV1_3]], ptr [[ARRAYIDX_4]], align 4
; AUTO_VEC-NEXT: [[CONV1_4:%.*]] = fadd float [[CONV1_3]], 5.000000e-01
-; AUTO_VEC-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, ptr [[INVARIANT_GEP7]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP5]], i64 20
; AUTO_VEC-NEXT: store float [[CONV1_4]], ptr [[ARRAYIDX_5]], align 4
; AUTO_VEC-NEXT: [[CONV1_5:%.*]] = fadd float [[CONV1_4]], 5.000000e-01
-; AUTO_VEC-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, ptr [[INVARIANT_GEP9]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP6]], i64 24
; AUTO_VEC-NEXT: store float [[CONV1_5]], ptr [[ARRAYIDX_6]], align 4
; AUTO_VEC-NEXT: [[CONV1_6:%.*]] = fadd float [[CONV1_5]], 5.000000e-01
-; AUTO_VEC-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, ptr [[INVARIANT_GEP11]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; AUTO_VEC-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP7]], i64 28
; AUTO_VEC-NEXT: store float [[CONV1_6]], ptr [[ARRAYIDX_7]], align 4
; AUTO_VEC-NEXT: [[CONV1_7]] = fadd float [[CONV1_6]], 5.000000e-01
; AUTO_VEC-NEXT: [[INDVARS_IV_NEXT_7]] = add nuw nsw i64 [[INDVARS_IV]], 8
@@ -299,40 +299,40 @@ define double @external_use_without_fast_math(ptr %a, i64 %n) {
; AUTO_VEC-NEXT: br i1 [[TMP0]], label [[FOR_END_UNR_LCSSA:%.*]], label [[ENTRY_NEW:%.*]]
; AUTO_VEC: entry.new:
; AUTO_VEC-NEXT: [[UNROLL_ITER:%.*]] = and i64 [[SMAX]], 9223372036854775800
-; AUTO_VEC-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 8
-; AUTO_VEC-NEXT: [[INVARIANT_GEP2:%.*]] = getelementptr i8, ptr [[A]], i64 16
-; AUTO_VEC-NEXT: [[INVARIANT_GEP4:%.*]] = getelementptr i8, ptr [[A]], i64 24
-; AUTO_VEC-NEXT: [[INVARIANT_GEP6:%.*]] = getelementptr i8, ptr [[A]], i64 32
-; AUTO_VEC-NEXT: [[INVARIANT_GEP8:%.*]] = getelementptr i8, ptr [[A]], i64 40
-; AUTO_VEC-NEXT: [[INVARIANT_GEP10:%.*]] = getelementptr i8, ptr [[A]], i64 48
-; AUTO_VEC-NEXT: [[INVARIANT_GEP12:%.*]] = getelementptr i8, ptr [[A]], i64 56
; AUTO_VEC-NEXT: br label [[FOR_BODY:%.*]]
; AUTO_VEC: for.body:
; AUTO_VEC-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY_NEW]] ], [ [[I_NEXT_7:%.*]], [[FOR_BODY]] ]
; AUTO_VEC-NEXT: [[J:%.*]] = phi double [ 0.000000e+00, [[ENTRY_NEW]] ], [ [[J_NEXT_7:%.*]], [[FOR_BODY]] ]
; AUTO_VEC-NEXT: [[NITER:%.*]] = phi i64 [ 0, [[ENTRY_NEW]] ], [ [[NITER_NEXT_7:%.*]], [[FOR_BODY]] ]
-; AUTO_VEC-NEXT: [[T0:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0:%.*]] = getelementptr double, ptr [[A:%.*]], i64 [[I]]
; AUTO_VEC-NEXT: store double [[J]], ptr [[T0]], align 8
; AUTO_VEC-NEXT: [[J_NEXT:%.*]] = fadd double [[J]], 3.000000e+00
-; AUTO_VEC-NEXT: [[T0_1:%.*]] = getelementptr double, ptr [[INVARIANT_GEP]], i64 [[I]]
+; AUTO_VEC-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0_1:%.*]] = getelementptr i8, ptr [[TMP1]], i64 8
; AUTO_VEC-NEXT: store double [[J_NEXT]], ptr [[T0_1]], align 8
; AUTO_VEC-NEXT: [[J_NEXT_1:%.*]] = fadd double [[J_NEXT]], 3.000000e+00
-; AUTO_VEC-NEXT: [[T0_2:%.*]] = getelementptr double, ptr [[INVARIANT_GEP2]], i64 [[I]]
+; AUTO_VEC-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0_2:%.*]] = getelementptr i8, ptr [[TMP2]], i64 16
; AUTO_VEC-NEXT: store double [[J_NEXT_1]], ptr [[T0_2]], align 8
; AUTO_VEC-NEXT: [[J_NEXT_2:%.*]] = fadd double [[J_NEXT_1]], 3.000000e+00
-; AUTO_VEC-NEXT: [[T0_3:%.*]] = getelementptr double, ptr [[INVARIANT_GEP4]], i64 [[I]]
+; AUTO_VEC-NEXT: [[TMP3:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0_3:%.*]] = getelementptr i8, ptr [[TMP3]], i64 24
; AUTO_VEC-NEXT: store double [[J_NEXT_2]], ptr [[T0_3]], align 8
; AUTO_VEC-NEXT: [[J_NEXT_3:%.*]] = fadd double [[J_NEXT_2]], 3.000000e+00
-; AUTO_VEC-NEXT: [[T0_4:%.*]] = getelementptr double, ptr [[INVARIANT_GEP6]], i64 [[I]]
+; AUTO_VEC-NEXT: [[TMP4:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0_4:%.*]] = getelementptr i8, ptr [[TMP4]], i64 32
; AUTO_VEC-NEXT: store double [[J_NEXT_3]], ptr [[T0_4]], align 8
; AUTO_VEC-NEXT: [[J_NEXT_4:%.*]] = fadd double [[J_NEXT_3]], 3.000000e+00
-; AUTO_VEC-NEXT: [[T0_5:%.*]] = getelementptr double, ptr [[INVARIANT_GEP8]], i64 [[I]]
+; AUTO_VEC-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0_5:%.*]] = getelementptr i8, ptr [[TMP5]], i64 40
; AUTO_VEC-NEXT: store double [[J_NEXT_4]], ptr [[T0_5]], align 8
; AUTO_VEC-NEXT: [[J_NEXT_5:%.*]] = fadd double [[J_NEXT_4]], 3.000000e+00
-; AUTO_VEC-NEXT: [[T0_6:%.*]] = getelementptr double, ptr [[INVARIANT_GEP10]], i64 [[I]]
+; AUTO_VEC-NEXT: [[TMP6:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0_6:%.*]] = getelementptr i8, ptr [[TMP6]], i64 48
; AUTO_VEC-NEXT: store double [[J_NEXT_5]], ptr [[T0_6]], align 8
; AUTO_VEC-NEXT: [[J_NEXT_6:%.*]] = fadd double [[J_NEXT_5]], 3.000000e+00
-; AUTO_VEC-NEXT: [[T0_7:%.*]] = getelementptr double, ptr [[INVARIANT_GEP12]], i64 [[I]]
+; AUTO_VEC-NEXT: [[TMP7:%.*]] = getelementptr double, ptr [[A]], i64 [[I]]
+; AUTO_VEC-NEXT: [[T0_7:%.*]] = getelementptr i8, ptr [[TMP7]], i64 56
; AUTO_VEC-NEXT: store double [[J_NEXT_6]], ptr [[T0_7]], align 8
; AUTO_VEC-NEXT: [[I_NEXT_7]] = add nuw nsw i64 [[I]], 8
; AUTO_VEC-NEXT: [[J_NEXT_7]] = fadd double [[J_NEXT_6]], 3.000000e+00
diff --git a/llvm/test/Transforms/LoopVectorize/intrinsic.ll b/llvm/test/Transforms/LoopVectorize/intrinsic.ll
index 10d83a4..9c910d7 100644
--- a/llvm/test/Transforms/LoopVectorize/intrinsic.ll
+++ b/llvm/test/Transforms/LoopVectorize/intrinsic.ll
@@ -324,56 +324,6 @@ for.end: ; preds = %for.body, %entry
declare double @llvm.exp2.f64(double)
-define void @ldexp_f32i32(i32 %n, ptr %y, ptr %x, i32 %exp) {
-; CHECK-LABEL: @ldexp_f32i32(
-; CHECK: llvm.ldexp.v4f32.v4i32
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
- %0 = load float, ptr %arrayidx, align 4
- %call = tail call float @llvm.ldexp.f32.i32(float %0, i32 %exp)
- %arrayidx2 = getelementptr inbounds float, ptr %x, i32 %iv
- store float %call, ptr %arrayidx2, align 4
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare float @llvm.ldexp.f32.i32(float, i32)
-
-define void @ldexp_f64i32(i32 %n, ptr %y, ptr %x, i32 %exp) {
-; CHECK-LABEL: @ldexp_f64i32(
-; CHECK: llvm.ldexp.v4f64.v4i32
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
- %0 = load double, ptr %arrayidx, align 8
- %call = tail call double @llvm.ldexp.f64.i32(double %0, i32 %exp)
- %arrayidx2 = getelementptr inbounds double, ptr %x, i32 %iv
- store double %call, ptr %arrayidx2, align 8
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare double @llvm.ldexp.f64.i32(double, i32)
-
define void @log_f32(i32 %n, ptr %y, ptr %x) {
; CHECK-LABEL: @log_f32(
; CHECK: llvm.log.v4f32
@@ -1026,157 +976,6 @@ for.end: ; preds = %for.body, %entry
declare double @llvm.roundeven.f64(double)
-
-define void @lround_i32f32(i32 %n, ptr %y, ptr %x) {
-; CHECK-LABEL: @lround_i32f32(
-; CHECK: llvm.lround.v4i32.v4f32
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
- %0 = load float, ptr %arrayidx, align 4
- %call = tail call i32 @llvm.lround.i32.f32(float %0)
- %arrayidx2 = getelementptr inbounds i32, ptr %x, i32 %iv
- store i32 %call, ptr %arrayidx2, align 4
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare i32 @llvm.lround.i32.f32(float)
-
-define void @lround_i32f64(i32 %n, ptr %y, ptr %x) {
-; CHECK-LABEL: @lround_i32f64(
-; CHECK: llvm.lround.v4i32.v4f64
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
- %0 = load double, ptr %arrayidx, align 8
- %call = tail call i32 @llvm.lround.i32.f64(double %0)
- %arrayidx2 = getelementptr inbounds i32, ptr %x, i32 %iv
- store i32 %call, ptr %arrayidx2, align 8
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare i32 @llvm.lround.i32.f64(double)
-
-define void @lround_i64f32(i32 %n, ptr %y, ptr %x) {
-; CHECK-LABEL: @lround_i64f32(
-; CHECK: llvm.lround.v4i64.v4f32
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
- %0 = load float, ptr %arrayidx, align 4
- %call = tail call i64 @llvm.lround.i64.f32(float %0)
- %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
- store i64 %call, ptr %arrayidx2, align 4
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare i64 @llvm.lround.i64.f32(float)
-
-define void @lround_i64f64(i32 %n, ptr %y, ptr %x) {
-; CHECK-LABEL: @lround_i64f64(
-; CHECK: llvm.lround.v4i64.v4f64
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
- %0 = load double, ptr %arrayidx, align 8
- %call = tail call i64 @llvm.lround.i64.f64(double %0)
- %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
- store i64 %call, ptr %arrayidx2, align 8
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare i64 @llvm.lround.i64.f64(double)
-
-define void @llround_i64f32(i32 %n, ptr %y, ptr %x) {
-; CHECK-LABEL: @llround_i64f32(
-; CHECK: llvm.llround.v4i64.v4f32
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
- %0 = load float, ptr %arrayidx, align 4
- %call = tail call i64 @llvm.llround.i64.f32(float %0)
- %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
- store i64 %call, ptr %arrayidx2, align 4
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare i64 @llvm.llround.i64.f32(float)
-
-define void @llround_i64f64(i32 %n, ptr %y, ptr %x) {
-; CHECK-LABEL: @llround_i64f64(
-; CHECK: llvm.llround.v4i64.v4f64
-; CHECK: ret void
-;
-entry:
- br label %for.body
-
-for.body: ; preds = %entry, %for.body
- %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
- %0 = load double, ptr %arrayidx, align 8
- %call = tail call i64 @llvm.llround.i64.f64(double %0)
- %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
- store i64 %call, ptr %arrayidx2, align 8
- %iv.next = add i32 %iv, 1
- %exitcond = icmp eq i32 %iv.next, %n
- br i1 %exitcond, label %for.end, label %for.body
-
-for.end: ; preds = %for.body, %entry
- ret void
-}
-
-declare i64 @llvm.llround.i64.f64(double)
-
define void @fma_f32(i32 %n, ptr %y, ptr %x, ptr %z, ptr %w) {
; CHECK-LABEL: @fma_f32(
; CHECK: llvm.fma.v4f32
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
index a85718d..4810952 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll
@@ -289,7 +289,7 @@ define i32 @conditional_and(ptr noalias %A, ptr noalias %B, i32 %cond, i64 nound
; CHECK-NEXT: [[TMP24:%.*]] = phi <4 x i32> [ [[TMP18]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP23]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP25:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP24]], <4 x i32> splat (i32 -1)
; CHECK-NEXT: [[TMP26:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP25]])
-; CHECK-NEXT: [[TMP27]] = and i32 [[TMP26]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP27]] = and i32 [[VEC_PHI]], [[TMP26]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -420,10 +420,10 @@ define i32 @simple_chained_rdx(ptr noalias %a, ptr noalias %b, ptr noalias %cond
; CHECK-NEXT: [[TMP40:%.*]] = phi <4 x i32> [ [[TMP30]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP38]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP41:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP39]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP41]])
-; CHECK-NEXT: [[TMP43:%.*]] = add i32 [[TMP42]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP43:%.*]] = add i32 [[VEC_PHI]], [[TMP42]]
; CHECK-NEXT: [[TMP44:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP40]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP44]])
-; CHECK-NEXT: [[TMP46]] = add i32 [[TMP45]], [[TMP43]]
+; CHECK-NEXT: [[TMP46]] = add i32 [[TMP43]], [[TMP45]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP47:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP47]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
index 795605d..755d7e2 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-pred.ll
@@ -55,7 +55,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP23:%.*]] = phi <4 x i32> [ [[TMP17]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP22]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP24:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP23]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP24]])
-; CHECK-NEXT: [[TMP26]] = add i32 [[TMP25]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP26]] = add i32 [[VEC_PHI]], [[TMP25]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -158,13 +158,13 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP39:%.*]] = phi <4 x i32> [ [[TMP29]], [[PRED_LOAD_CONTINUE6]] ], [ [[TMP37]], [[PRED_LOAD_IF7]] ]
; CHECK-NEXT: [[TMP40:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[VEC_IND1]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP40]])
-; CHECK-NEXT: [[TMP42:%.*]] = add i32 [[TMP41]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP42:%.*]] = add i32 [[VEC_PHI]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP38]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP43]])
-; CHECK-NEXT: [[TMP45:%.*]] = add i32 [[TMP44]], [[TMP42]]
+; CHECK-NEXT: [[TMP45:%.*]] = add i32 [[TMP42]], [[TMP44]]
; CHECK-NEXT: [[TMP46:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP39]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP47:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP46]])
-; CHECK-NEXT: [[TMP48]] = add i32 [[TMP47]], [[TMP45]]
+; CHECK-NEXT: [[TMP48]] = add i32 [[TMP45]], [[TMP47]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[VEC_IND_NEXT2]] = add <4 x i32> [[VEC_IND1]], splat (i32 4)
@@ -256,10 +256,10 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP23:%.*]] = phi <4 x i32> [ [[TMP17]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP22]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP24:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP23]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP24]])
-; CHECK-NEXT: [[TMP26:%.*]] = add i32 [[TMP25]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP26:%.*]] = add i32 [[VEC_PHI]], [[TMP25]]
; CHECK-NEXT: [[TMP27:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> splat (i32 3), <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP28:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP27]])
-; CHECK-NEXT: [[TMP29]] = add i32 [[TMP28]], [[TMP26]]
+; CHECK-NEXT: [[TMP29]] = add i32 [[TMP26]], [[TMP28]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -363,13 +363,13 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP39:%.*]] = phi <4 x i32> [ [[TMP29]], [[PRED_LOAD_CONTINUE6]] ], [ [[TMP37]], [[PRED_LOAD_IF7]] ]
; CHECK-NEXT: [[TMP40:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[VEC_IND1]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP40]])
-; CHECK-NEXT: [[TMP42:%.*]] = mul i32 [[TMP41]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP42:%.*]] = mul i32 [[VEC_PHI]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP38]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP43]])
-; CHECK-NEXT: [[TMP45:%.*]] = mul i32 [[TMP44]], [[TMP42]]
+; CHECK-NEXT: [[TMP45:%.*]] = mul i32 [[TMP42]], [[TMP44]]
; CHECK-NEXT: [[TMP46:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP39]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP47:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP46]])
-; CHECK-NEXT: [[TMP48]] = mul i32 [[TMP47]], [[TMP45]]
+; CHECK-NEXT: [[TMP48]] = mul i32 [[TMP45]], [[TMP47]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[VEC_IND_NEXT2]] = add <4 x i32> [[VEC_IND1]], splat (i32 4)
@@ -478,11 +478,11 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP50:%.*]] = phi <4 x i32> [ [[TMP34]], [[PRED_LOAD_CONTINUE6]] ], [ [[TMP49]], [[PRED_LOAD_IF7]] ]
; CHECK-NEXT: [[TMP41:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[VEC_IND1]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP41]])
-; CHECK-NEXT: [[TMP43:%.*]] = add i32 [[TMP42]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP43:%.*]] = add i32 [[VEC_PHI]], [[TMP42]]
; CHECK-NEXT: [[TMP40:%.*]] = mul nsw <4 x i32> [[TMP50]], [[TMP39]]
; CHECK-NEXT: [[TMP44:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP40]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP44]])
-; CHECK-NEXT: [[TMP46]] = add i32 [[TMP45]], [[TMP43]]
+; CHECK-NEXT: [[TMP46]] = add i32 [[TMP43]], [[TMP45]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[VEC_IND_NEXT2]] = add <4 x i32> [[VEC_IND1]], splat (i32 4)
@@ -590,10 +590,10 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP39:%.*]] = phi <4 x i32> [ [[TMP29]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP37]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP40:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP38]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP40]])
-; CHECK-NEXT: [[TMP42:%.*]] = mul i32 [[TMP41]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP42:%.*]] = mul i32 [[VEC_PHI]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP39]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP43]])
-; CHECK-NEXT: [[TMP45]] = mul i32 [[TMP44]], [[TMP42]]
+; CHECK-NEXT: [[TMP45]] = mul i32 [[TMP42]], [[TMP44]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -698,10 +698,10 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP39:%.*]] = phi <4 x i32> [ [[TMP29]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP37]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP40:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP38]], <4 x i32> splat (i32 -1)
; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP40]])
-; CHECK-NEXT: [[TMP42:%.*]] = and i32 [[TMP41]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP42:%.*]] = and i32 [[VEC_PHI]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP39]], <4 x i32> splat (i32 -1)
; CHECK-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[TMP43]])
-; CHECK-NEXT: [[TMP45]] = and i32 [[TMP44]], [[TMP42]]
+; CHECK-NEXT: [[TMP45]] = and i32 [[TMP42]], [[TMP44]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -807,7 +807,7 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP40:%.*]] = add nsw <4 x i32> [[TMP39]], [[TMP38]]
; CHECK-NEXT: [[TMP41:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP40]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP41]])
-; CHECK-NEXT: [[TMP43]] = or i32 [[TMP42]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP43]] = or i32 [[VEC_PHI]], [[TMP42]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -913,7 +913,7 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP40:%.*]] = add nsw <4 x i32> [[TMP39]], [[TMP38]]
; CHECK-NEXT: [[TMP41:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP40]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP41]])
-; CHECK-NEXT: [[TMP43]] = xor i32 [[TMP42]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP43]] = xor i32 [[VEC_PHI]], [[TMP42]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
@@ -1124,10 +1124,10 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP39:%.*]] = phi <4 x float> [ [[TMP29]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP37]], [[PRED_LOAD_IF5]] ]
; CHECK-NEXT: [[TMP40:%.*]] = select fast <4 x i1> [[TMP0]], <4 x float> [[TMP38]], <4 x float> splat (float 1.000000e+00)
; CHECK-NEXT: [[TMP41:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP40]])
-; CHECK-NEXT: [[TMP42:%.*]] = fmul fast float [[TMP41]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP42:%.*]] = fmul fast float [[VEC_PHI]], [[TMP41]]
; CHECK-NEXT: [[TMP43:%.*]] = select fast <4 x i1> [[TMP0]], <4 x float> [[TMP39]], <4 x float> splat (float 1.000000e+00)
; CHECK-NEXT: [[TMP44:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[TMP43]])
-; CHECK-NEXT: [[TMP45]] = fmul fast float [[TMP44]], [[TMP42]]
+; CHECK-NEXT: [[TMP45]] = fmul fast float [[TMP42]], [[TMP44]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 260
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll
index 12d83eb..20b42c3 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-uf4.ll
@@ -24,13 +24,13 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4
; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP5]] = add i32 [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP5]] = add i32 [[VEC_PHI]], [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD4]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI1]], [[TMP6]]
; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD5]])
-; CHECK-NEXT: [[TMP9]] = add i32 [[TMP8]], [[VEC_PHI2]]
+; CHECK-NEXT: [[TMP9]] = add i32 [[VEC_PHI2]], [[TMP8]]
; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD6]])
-; CHECK-NEXT: [[TMP11]] = add i32 [[TMP10]], [[VEC_PHI3]]
+; CHECK-NEXT: [[TMP11]] = add i32 [[VEC_PHI3]], [[TMP10]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -96,21 +96,21 @@ define i64 @reduction_sum_chain(ptr noalias %p, ptr noalias %q) {
; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x i64>, ptr [[TMP6]], align 8
; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x i64>, ptr [[TMP7]], align 8
; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP8]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[VEC_PHI]], [[TMP8]]
; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD4]])
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], [[VEC_PHI1]]
+; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[VEC_PHI1]], [[TMP10]]
; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD5]])
-; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], [[VEC_PHI2]]
+; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[VEC_PHI2]], [[TMP12]]
; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD6]])
-; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[VEC_PHI3]]
+; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[VEC_PHI3]], [[TMP14]]
; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD7]])
-; CHECK-NEXT: [[TMP17]] = add i64 [[TMP16]], [[TMP9]]
+; CHECK-NEXT: [[TMP17]] = add i64 [[TMP9]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD8]])
-; CHECK-NEXT: [[TMP19]] = add i64 [[TMP18]], [[TMP11]]
+; CHECK-NEXT: [[TMP19]] = add i64 [[TMP11]], [[TMP18]]
; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD9]])
-; CHECK-NEXT: [[TMP21]] = add i64 [[TMP20]], [[TMP13]]
+; CHECK-NEXT: [[TMP21]] = add i64 [[TMP13]], [[TMP20]]
; CHECK-NEXT: [[TMP22:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[WIDE_LOAD10]])
-; CHECK-NEXT: [[TMP23]] = add i64 [[TMP22]], [[TMP15]]
+; CHECK-NEXT: [[TMP23]] = add i64 [[TMP15]], [[TMP22]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -332,16 +332,16 @@ define i32 @predicated(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP98:%.*]] = phi <4 x i32> [ [[TMP92]], [[PRED_LOAD_CONTINUE34]] ], [ [[TMP97]], [[PRED_LOAD_IF35]] ]
; CHECK-NEXT: [[TMP99:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP26]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP100:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP99]])
-; CHECK-NEXT: [[TMP101]] = add i32 [[TMP100]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP101]] = add i32 [[VEC_PHI]], [[TMP100]]
; CHECK-NEXT: [[TMP102:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[TMP50]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP103:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP102]])
-; CHECK-NEXT: [[TMP104]] = add i32 [[TMP103]], [[VEC_PHI4]]
+; CHECK-NEXT: [[TMP104]] = add i32 [[VEC_PHI4]], [[TMP103]]
; CHECK-NEXT: [[TMP105:%.*]] = select <4 x i1> [[TMP2]], <4 x i32> [[TMP74]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP106:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP105]])
-; CHECK-NEXT: [[TMP107]] = add i32 [[TMP106]], [[VEC_PHI5]]
+; CHECK-NEXT: [[TMP107]] = add i32 [[VEC_PHI5]], [[TMP106]]
; CHECK-NEXT: [[TMP108:%.*]] = select <4 x i1> [[TMP3]], <4 x i32> [[TMP98]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP109:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP108]])
-; CHECK-NEXT: [[TMP110]] = add i32 [[TMP109]], [[VEC_PHI6]]
+; CHECK-NEXT: [[TMP110]] = add i32 [[VEC_PHI6]], [[TMP109]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 16)
; CHECK-NEXT: [[TMP111:%.*]] = icmp eq i64 [[INDEX_NEXT]], 272
@@ -571,16 +571,16 @@ define i32 @cond_rdx_pred(i32 %cond, ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[TMP106:%.*]] = phi <4 x i32> [ [[TMP100]], [[PRED_LOAD_CONTINUE36]] ], [ [[TMP105]], [[PRED_LOAD_IF37]] ]
; CHECK-NEXT: [[TMP107:%.*]] = select <4 x i1> [[TMP8]], <4 x i32> [[TMP34]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP108:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP107]])
-; CHECK-NEXT: [[TMP109]] = mul i32 [[TMP108]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP109]] = mul i32 [[VEC_PHI]], [[TMP108]]
; CHECK-NEXT: [[TMP110:%.*]] = select <4 x i1> [[TMP9]], <4 x i32> [[TMP58]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP111:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP110]])
-; CHECK-NEXT: [[TMP112]] = mul i32 [[TMP111]], [[VEC_PHI4]]
+; CHECK-NEXT: [[TMP112]] = mul i32 [[VEC_PHI4]], [[TMP111]]
; CHECK-NEXT: [[TMP113:%.*]] = select <4 x i1> [[TMP10]], <4 x i32> [[TMP82]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP114:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP113]])
-; CHECK-NEXT: [[TMP115]] = mul i32 [[TMP114]], [[VEC_PHI5]]
+; CHECK-NEXT: [[TMP115]] = mul i32 [[VEC_PHI5]], [[TMP114]]
; CHECK-NEXT: [[TMP116:%.*]] = select <4 x i1> [[TMP11]], <4 x i32> [[TMP106]], <4 x i32> splat (i32 1)
; CHECK-NEXT: [[TMP117:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[TMP116]])
-; CHECK-NEXT: [[TMP118]] = mul i32 [[TMP117]], [[VEC_PHI6]]
+; CHECK-NEXT: [[TMP118]] = mul i32 [[VEC_PHI6]], [[TMP117]]
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 16)
; CHECK-NEXT: [[TMP119:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
index b302868..0529d84 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
@@ -15,7 +15,7 @@ define i32 @reduction_sum_single(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP2]] = add i32 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2]] = add i32 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -63,11 +63,11 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VEC_IND]])
-; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[TMP5]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -118,7 +118,7 @@ define i32 @reduction_sum_const(ptr noalias nocapture %A) {
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[VEC_PHI]], [[TMP1]]
; CHECK-NEXT: [[TMP3]] = add i32 [[TMP2]], 12
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -168,11 +168,11 @@ define i32 @reduction_prod(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[VEC_IND]])
-; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP7]] = mul i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[TMP7]] = mul i32 [[TMP5]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -226,10 +226,10 @@ define i32 @reduction_mix(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[B1:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP8]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VEC_IND]])
-; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD2]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP6]] = add i32 [[TMP5]], [[TMP4]]
+; CHECK-NEXT: [[TMP6]] = add i32 [[TMP4]], [[TMP5]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -282,9 +282,9 @@ define i32 @reduction_mul(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP5]] = mul i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5]] = mul i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
@@ -335,7 +335,7 @@ define i32 @start_at_non_zero(ptr nocapture %in, ptr nocapture %coeff, ptr nocap
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = mul nsw <4 x i32> [[WIDE_LOAD2]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
@@ -385,9 +385,9 @@ define i32 @reduction_and(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP5]] = and i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5]] = and i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
@@ -438,7 +438,7 @@ define i32 @reduction_or(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = or i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = or i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
@@ -489,7 +489,7 @@ define i32 @reduction_xor(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = xor i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = xor i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
@@ -589,9 +589,9 @@ define float @reduction_fmul(ptr nocapture %A, ptr nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP3:%.*]] = fmul fast float [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = fmul fast float [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fmul.v4f32(float 1.000000e+00, <4 x float> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP5]] = fmul fast float [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5]] = fmul fast float [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
@@ -826,11 +826,11 @@ define i32 @reduction_predicated(ptr noalias nocapture %A, ptr noalias nocapture
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[VEC_IND]])
-; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[VEC_PHI]], [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD1]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[TMP5]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
@@ -984,7 +984,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) {
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = fmul <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP3:%.*]] = call float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP2]])
-; CHECK-NEXT: [[TMP4]] = fadd float [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = fadd float [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
@@ -1127,7 +1127,7 @@ define float @reduction_fmuladd_blend(ptr %a, ptr %b, i64 %n, i1 %c) {
; CHECK-NEXT: [[TMP4:%.*]] = fmul <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP5:%.*]] = select <4 x i1> [[TMP1]], <4 x float> [[TMP4]], <4 x float> splat (float -0.000000e+00)
; CHECK-NEXT: [[TMP6:%.*]] = call float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = fadd float [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = fadd float [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]]
@@ -1210,7 +1210,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h
; CHECK-NEXT: [[TMP4:%.*]] = udiv <4 x i8> [[TMP3]], splat (i8 31)
; CHECK-NEXT: [[TMP5:%.*]] = zext nneg <4 x i8> [[TMP4]] to <4 x i32>
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP7]] = add i32 [[VEC_PHI]], [[TMP6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]]
@@ -1294,10 +1294,10 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read
; CHECK-NEXT: [[TMP4:%.*]] = udiv <4 x i8> [[TMP3]], splat (i8 31)
; CHECK-NEXT: [[TMP5:%.*]] = zext nneg <4 x i8> [[TMP4]] to <4 x i32>
; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP7]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[VEC_PHI]], [[TMP7]]
; CHECK-NEXT: [[TMP9:%.*]] = zext nneg <4 x i8> [[TMP4]] to <4 x i32>
; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP9]])
-; CHECK-NEXT: [[TMP11]] = add i32 [[TMP10]], [[TMP8]]
+; CHECK-NEXT: [[TMP11]] = add i32 [[TMP8]], [[TMP10]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll
index be0e0d1..5c52b1a 100644
--- a/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll
+++ b/llvm/test/Transforms/LoopVectorize/remarks-reduction-inloop.ll
@@ -18,7 +18,7 @@ define i32 @reduction_sum(ptr noalias nocapture %A, ptr noalias nocapture %B) {
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[WIDE_LOAD]])
-; CHECK-NEXT: [[TMP4]] = add i32 [[TMP3]], [[VEC_PHI]]
+; CHECK-NEXT: [[TMP4]] = add i32 [[VEC_PHI]], [[TMP3]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll
index 4612545..f8bda1c 100644
--- a/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll
+++ b/llvm/test/Transforms/LoopVectorize/reuse-lcssa-phi-scev-expansion.ll
@@ -389,22 +389,18 @@ define void @scev_exp_reuse_const_add(ptr %dst, ptr %src) {
; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[SRC2:%.*]] = ptrtoint ptr [[SRC]] to i64
-; CHECK-NEXT: [[DST1:%.*]] = ptrtoint ptr [[DST]] to i64
; CHECK-NEXT: br label %[[LOOP_1:.*]]
; CHECK: [[LOOP_1]]:
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], %[[LOOP_1]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: [[PTR_IV_1:%.*]] = phi ptr [ [[DST]], %[[ENTRY]] ], [ [[PTR_IV_1_NEXT:%.*]], %[[LOOP_1]] ]
; CHECK-NEXT: [[PTR_IV_1_NEXT]] = getelementptr i8, ptr [[PTR_IV_1]], i64 2
; CHECK-NEXT: [[C:%.*]] = call i1 @cond()
-; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1
; CHECK-NEXT: br i1 [[C]], label %[[LOOP_2_PH:.*]], label %[[LOOP_1]]
; CHECK: [[LOOP_2_PH]]:
-; CHECK-NEXT: [[INDVAR_LCSSA:%.*]] = phi i64 [ [[INDVAR]], %[[LOOP_1]] ]
; CHECK-NEXT: [[PTR_IV_1_NEXT_LCSSA:%.*]] = phi ptr [ [[PTR_IV_1_NEXT]], %[[LOOP_1]] ]
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
-; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[DST1]], [[SRC2]]
-; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDVAR_LCSSA]], 1
+; CHECK-NEXT: [[TMP0:%.*]] = sub i64 -2, [[SRC2]]
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[PTR_IV_1_NEXT_LCSSA]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], [[TMP0]]
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
; CHECK-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
@@ -426,11 +422,11 @@ define void @scev_exp_reuse_const_add(ptr %dst, ptr %src) {
; CHECK-NEXT: br label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 40, %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_2_PH]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
-; CHECK-NEXT: [[BC_RESUME_VAL3:%.*]] = phi ptr [ [[TMP3]], %[[MIDDLE_BLOCK]] ], [ [[PTR_IV_1_NEXT_LCSSA]], %[[LOOP_2_PH]] ], [ [[PTR_IV_1_NEXT_LCSSA]], %[[VECTOR_MEMCHECK]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[TMP3]], %[[MIDDLE_BLOCK]] ], [ [[PTR_IV_1_NEXT_LCSSA]], %[[LOOP_2_PH]] ], [ [[PTR_IV_1_NEXT_LCSSA]], %[[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label %[[LOOP_2:.*]]
; CHECK: [[LOOP_2]]:
; CHECK-NEXT: [[IV_1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP_2]] ]
-; CHECK-NEXT: [[PTR_IV_2:%.*]] = phi ptr [ [[BC_RESUME_VAL3]], %[[SCALAR_PH]] ], [ [[PTR_IV_2_NEXT:%.*]], %[[LOOP_2]] ]
+; CHECK-NEXT: [[PTR_IV_2:%.*]] = phi ptr [ [[BC_RESUME_VAL2]], %[[SCALAR_PH]] ], [ [[PTR_IV_2_NEXT:%.*]], %[[LOOP_2]] ]
; CHECK-NEXT: [[IV_2_NEXT]] = add i64 [[IV_1]], 1
; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV_2_NEXT]]
; CHECK-NEXT: [[L:%.*]] = load i16, ptr [[GEP_SRC_1]], align 2
diff --git a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll
index af2b238..efd9f8b 100644
--- a/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll
+++ b/llvm/test/Transforms/LoopVectorize/uniform_across_vf_induction1_and.ll
@@ -15,6 +15,13 @@ define void @ld_and_neg1_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = and i64 [[INDEX]], -1
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP0]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP1]], align 8
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw <2 x i64> [[WIDE_LOAD]], splat (i64 42)
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: store <2 x i64> [[TMP2]], ptr [[TMP3]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
+; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
@@ -54,6 +61,10 @@ define void @ld_and_neg2_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP3]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
@@ -97,6 +108,11 @@ define void @ld_and_neg3_step1_start0_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i64> [[TMP7]], i64 [[TMP6]], i32 1
; CHECK-NEXT: [[TMP9:%.*]] = add nsw <2 x i64> [[TMP8]], splat (i64 42)
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: store <2 x i64> [[TMP9]], ptr [[TMP10]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[EXIT:%.*]]
; CHECK: scalar.ph:
@@ -351,6 +367,11 @@ define void @ld_and_neg2_step1_start1_ind1(ptr noalias %A, ptr noalias %B) {
; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i64> [[TMP7]], i64 [[TMP6]], i32 1
; CHECK-NEXT: [[TMP9:%.*]] = add nsw <2 x i64> [[TMP8]], splat (i64 42)
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: store <2 x i64> [[TMP9]], ptr [[TMP10]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 998
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH]]
; CHECK: scalar.ph:
diff --git a/llvm/test/Transforms/LowerTypeTests/Inputs/exported-funcs.yaml b/llvm/test/Transforms/LowerTypeTests/Inputs/exported-funcs.yaml
index 5457e36..81df2f1 100644
--- a/llvm/test/Transforms/LowerTypeTests/Inputs/exported-funcs.yaml
+++ b/llvm/test/Transforms/LowerTypeTests/Inputs/exported-funcs.yaml
@@ -19,4 +19,12 @@ GlobalValueMap:
15859245615183425489: # guid("internal")
- Linkage: 7 # internal
Live: true
+ 1062103744896965210: # guid("alias1")
+ - Linkage: 4 # weak
+ Live: true
+ Aliasee: 16594175687743574550 # guid("external_addrtaken")
+ 2510616090736846890: # guid("alias2")
+ - Linkage: 0 # weak
+ Live: true
+ Aliasee: 16594175687743574550 # guid("external_addrtaken")
...
diff --git a/llvm/test/Transforms/LowerTypeTests/export-alias.ll b/llvm/test/Transforms/LowerTypeTests/export-alias.ll
index 45b4db6..25d3483 100644
--- a/llvm/test/Transforms/LowerTypeTests/export-alias.ll
+++ b/llvm/test/Transforms/LowerTypeTests/export-alias.ll
@@ -1,21 +1,19 @@
; RUN: opt -S %s -passes=lowertypetests -lowertypetests-summary-action=export -lowertypetests-read-summary=%S/Inputs/exported-funcs.yaml | FileCheck %s
;
-; CHECK: @alias1 = weak alias [8 x i8], ptr @external_addrtaken
-; CHECK: @alias2 = hidden alias [8 x i8], ptr @external_addrtaken
+; CHECK: @alias1 = alias [8 x i8], ptr @external_addrtaken
+; CHECK: @alias2 = alias [8 x i8], ptr @external_addrtaken
; CHECK-NOT: @alias3 = alias
; CHECK-NOT: @not_present
target triple = "x86_64-unknown-linux"
-!cfi.functions = !{!0, !2, !3}
-!aliases = !{!4, !5, !6}
+!cfi.functions = !{!0, !2, !3, !4}
+!aliases = !{!5, !6}
!0 = !{!"external_addrtaken", i8 0, !1}
!1 = !{i64 0, !"typeid1"}
-!2 = !{!"alias1", i8 1, !1}
-; alias2 not included here, this could happen if the only reference to alias2
-; is in a module compiled without cfi-icall
-!3 = !{!"alias3", i8 1, !1}
-!4 = !{!"alias1", !"external_addrtaken", i8 0, i8 1}
-!5 = !{!"alias2", !"external_addrtaken", i8 1, i8 0}
-!6 = !{!"alias3", !"not_present", i8 0, i8 0}
+!2 = !{!"alias1", i8 0, !1}
+!3 = !{!"alias2", i8 0, !1}
+!4 = !{!"alias3", i8 0, !1}
+!5 = !{!"external_addrtaken", !"alias1", !"alias2"}
+!6 = !{!"not_present", !"alias3"}
diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
index a13c36f..b932a69 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mean_q7.ll
@@ -41,7 +41,7 @@ define void @arm_mean_q7(ptr noundef %pSrc, i32 noundef %blockSize, ptr noundef
; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_MASKED_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP5:%.*]] = select <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i32> [[TMP4]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]])
-; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], [[SUM_0_LCSSA]]
+; CHECK-NEXT: [[TMP7:%.*]] = add i32 [[SUM_0_LCSSA]], [[TMP6]]
; CHECK-NEXT: br label [[WHILE_END5]]
; CHECK: while.end5:
; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_0_LCSSA]], [[WHILE_END]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/pr88239.ll b/llvm/test/Transforms/PhaseOrdering/X86/pr88239.ll
index c98e7d3..482907d 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/pr88239.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/pr88239.ll
@@ -8,12 +8,12 @@ define void @foo(ptr noalias noundef %0, ptr noalias noundef %1) optsize {
; CHECK-LABEL: define void @foo(
; CHECK-SAME: ptr noalias noundef readonly captures(none) [[TMP0:%.*]], ptr noalias noundef writeonly captures(none) [[TMP1:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: vector.ph:
-; CHECK-NEXT: [[INVARIANT_GEP:%.*]] = getelementptr i8, ptr [[TMP0]], i64 -28
; CHECK-NEXT: br label [[TMP4:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[TMP2:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[TMP4]] ]
; CHECK-NEXT: [[TMP3:%.*]] = sub nuw nsw i64 255, [[INDVARS_IV]]
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[INVARIANT_GEP]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP0]], i64 [[TMP3]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 -28
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = load <8 x i32>, ptr [[GEP]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = add nsw <8 x i32> [[WIDE_MASKED_GATHER]], splat (i32 5)
; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <8 x i32> [[TMP5]], <8 x i32> poison, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
diff --git a/llvm/test/Transforms/PhaseOrdering/lower-table-based-cttz.ll b/llvm/test/Transforms/PhaseOrdering/lower-table-based-cttz.ll
index 19fbc1f..4455016 100644
--- a/llvm/test/Transforms/PhaseOrdering/lower-table-based-cttz.ll
+++ b/llvm/test/Transforms/PhaseOrdering/lower-table-based-cttz.ll
@@ -1,3 +1,6 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -O3 -S < %s | FileCheck %s
+
;; This tests lowering of the implementations of table-based ctz
;; algorithm to the llvm.cttz instruction in the -O3 case.
@@ -13,13 +16,17 @@
;; }
;; Compiled as: clang -O3 test.c -S -emit-llvm -Xclang -disable-llvm-optzns
-; RUN: opt -O3 -S < %s | FileCheck %s
-
-; CHECK: call range(i32 0, 33) i32 @llvm.cttz.i32
-
@ctz1.table = internal constant [32 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 16
-define i32 @ctz1(i32 noundef %x) {
+define i32 @ctz(i32 noundef %x) {
+; CHECK-LABEL: define range(i32 0, 32) i32 @ctz(
+; CHECK-SAME: i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = tail call range(i32 0, 33) i32 @llvm.cttz.i32(i32 [[X]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: [[CONV:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: ret i32 [[CONV]]
+;
entry:
%x.addr = alloca i32, align 4
store i32 %x, ptr %x.addr, align 4
@@ -35,3 +42,28 @@ entry:
%conv = sext i8 %2 to i32
ret i32 %conv
}
+
+define i32 @ctz_nonarraygep(i32 noundef %x) {
+; CHECK-LABEL: define range(i32 0, 32) i32 @ctz_nonarraygep(
+; CHECK-SAME: i32 noundef [[X:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = tail call range(i32 0, 33) i32 @llvm.cttz.i32(i32 [[X]], i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
+; CHECK-NEXT: [[CONV:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: ret i32 [[CONV]]
+;
+entry:
+ %x.addr = alloca i32, align 4
+ store i32 %x, ptr %x.addr, align 4
+ %0 = load i32, ptr %x.addr, align 4
+ %1 = load i32, ptr %x.addr, align 4
+ %sub = sub i32 0, %1
+ %and = and i32 %0, %sub
+ %mul = mul i32 %and, 125613361
+ %shr = lshr i32 %mul, 27
+ %idxprom = zext i32 %shr to i64
+ %arrayidx = getelementptr inbounds i8, ptr @ctz1.table, i64 %idxprom
+ %2 = load i8, ptr %arrayidx, align 1
+ %conv = sext i8 %2 to i32
+ ret i32 %conv
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll
deleted file mode 100644
index 301e5da..0000000
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll
+++ /dev/null
@@ -1,279 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=slp-vectorizer -mtriple=aarch64 < %s | FileCheck %s
-
-target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
-
-define void @ldexp_f32i32(ptr %x, ptr %y, i32 %exp) {
-; CHECK-LABEL: @ldexp_f32i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L0]], i32 [[EXP:%.*]])
-; CHECK-NEXT: [[L3:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L2]], i32 [[EXP]])
-; CHECK-NEXT: [[L5:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L4]], i32 [[EXP]])
-; CHECK-NEXT: [[L7:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L6]], i32 [[EXP]])
-; CHECK-NEXT: store float [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 1
-; CHECK-NEXT: store float [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 2
-; CHECK-NEXT: store float [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 3
-; CHECK-NEXT: store float [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load float, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
- %l2 = load float, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
- %l4 = load float, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
- %l6 = load float, ptr %arrayidx.3, align 4
- %l1 = tail call float @llvm.ldexp.f32.i32(float %l0, i32 %exp)
- %l3 = tail call float @llvm.ldexp.f32.i32(float %l2, i32 %exp)
- %l5 = tail call float @llvm.ldexp.f32.i32(float %l4, i32 %exp)
- %l7 = tail call float @llvm.ldexp.f32.i32(float %l6, i32 %exp)
- store float %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds float, ptr %y, i64 1
- store float %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds float, ptr %y, i64 2
- store float %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds float, ptr %y, i64 3
- store float %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @ldexp_f64i32(ptr %x, ptr %y, i32 %exp) {
-; CHECK-LABEL: @ldexp_f64i32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L0]], i32 [[EXP:%.*]])
-; CHECK-NEXT: [[L3:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L2]], i32 [[EXP]])
-; CHECK-NEXT: [[L5:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L4]], i32 [[EXP]])
-; CHECK-NEXT: [[L7:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L6]], i32 [[EXP]])
-; CHECK-NEXT: store double [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 1
-; CHECK-NEXT: store double [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 2
-; CHECK-NEXT: store double [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 3
-; CHECK-NEXT: store double [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load double, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
- %l2 = load double, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
- %l4 = load double, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
- %l6 = load double, ptr %arrayidx.3, align 4
- %l1 = tail call double @llvm.ldexp.f64.i32(double %l0, i32 %exp)
- %l3 = tail call double @llvm.ldexp.f64.i32(double %l2, i32 %exp)
- %l5 = tail call double @llvm.ldexp.f64.i32(double %l4, i32 %exp)
- %l7 = tail call double @llvm.ldexp.f64.i32(double %l6, i32 %exp)
- store double %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds double, ptr %y, i64 1
- store double %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds double, ptr %y, i64 2
- store double %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds double, ptr %y, i64 3
- store double %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @ldexp_f32i64(ptr %x, ptr %y, i64 %exp) {
-; CHECK-LABEL: @ldexp_f32i64(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L0]], i64 [[EXP:%.*]])
-; CHECK-NEXT: [[L3:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L2]], i64 [[EXP]])
-; CHECK-NEXT: [[L5:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L4]], i64 [[EXP]])
-; CHECK-NEXT: [[L7:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L6]], i64 [[EXP]])
-; CHECK-NEXT: store float [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 1
-; CHECK-NEXT: store float [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 2
-; CHECK-NEXT: store float [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 3
-; CHECK-NEXT: store float [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load float, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
- %l2 = load float, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
- %l4 = load float, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
- %l6 = load float, ptr %arrayidx.3, align 4
- %l1 = tail call float @llvm.ldexp.f32.i64(float %l0, i64 %exp)
- %l3 = tail call float @llvm.ldexp.f32.i64(float %l2, i64 %exp)
- %l5 = tail call float @llvm.ldexp.f32.i64(float %l4, i64 %exp)
- %l7 = tail call float @llvm.ldexp.f32.i64(float %l6, i64 %exp)
- store float %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds float, ptr %y, i64 1
- store float %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds float, ptr %y, i64 2
- store float %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds float, ptr %y, i64 3
- store float %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @ldexp_f64i64(ptr %x, ptr %y, i64 %exp) {
-; CHECK-LABEL: @ldexp_f64i64(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L0]], i64 [[EXP:%.*]])
-; CHECK-NEXT: [[L3:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L2]], i64 [[EXP]])
-; CHECK-NEXT: [[L5:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L4]], i64 [[EXP]])
-; CHECK-NEXT: [[L7:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L6]], i64 [[EXP]])
-; CHECK-NEXT: store double [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 1
-; CHECK-NEXT: store double [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 2
-; CHECK-NEXT: store double [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 3
-; CHECK-NEXT: store double [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load double, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
- %l2 = load double, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
- %l4 = load double, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
- %l6 = load double, ptr %arrayidx.3, align 4
- %l1 = tail call double @llvm.ldexp.f64.i64(double %l0, i64 %exp)
- %l3 = tail call double @llvm.ldexp.f64.i64(double %l2, i64 %exp)
- %l5 = tail call double @llvm.ldexp.f64.i64(double %l4, i64 %exp)
- %l7 = tail call double @llvm.ldexp.f64.i64(double %l6, i64 %exp)
- store double %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds double, ptr %y, i64 1
- store double %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds double, ptr %y, i64 2
- store double %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds double, ptr %y, i64 3
- store double %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @ldexp_f32i32_i64(ptr %x, ptr %y, i32 %exp32, i64 %exp64) {
-; CHECK-LABEL: @ldexp_f32i32_i64(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L0]], i32 [[EXP32:%.*]])
-; CHECK-NEXT: [[L3:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L2]], i32 [[EXP32]])
-; CHECK-NEXT: [[L5:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L4]], i64 [[EXP64:%.*]])
-; CHECK-NEXT: [[L7:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L6]], i64 [[EXP64]])
-; CHECK-NEXT: store float [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 1
-; CHECK-NEXT: store float [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 2
-; CHECK-NEXT: store float [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 3
-; CHECK-NEXT: store float [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load float, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
- %l2 = load float, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
- %l4 = load float, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
- %l6 = load float, ptr %arrayidx.3, align 4
- %l1 = tail call float @llvm.ldexp.f32.i32(float %l0, i32 %exp32)
- %l3 = tail call float @llvm.ldexp.f32.i32(float %l2, i32 %exp32)
- %l5 = tail call float @llvm.ldexp.f32.i64(float %l4, i64 %exp64)
- %l7 = tail call float @llvm.ldexp.f32.i64(float %l6, i64 %exp64)
- store float %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds float, ptr %y, i64 1
- store float %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds float, ptr %y, i64 2
- store float %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds float, ptr %y, i64 3
- store float %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @ldexp_f64_i32_i64(ptr %x, ptr %y, i32 %exp32, i64 %exp64) {
-; CHECK-LABEL: @ldexp_f64_i32_i64(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L0]], i32 [[EXP32:%.*]])
-; CHECK-NEXT: [[L3:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L2]], i32 [[EXP32]])
-; CHECK-NEXT: [[L5:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L4]], i64 [[EXP64:%.*]])
-; CHECK-NEXT: [[L7:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L6]], i64 [[EXP64]])
-; CHECK-NEXT: store double [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 1
-; CHECK-NEXT: store double [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 2
-; CHECK-NEXT: store double [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 3
-; CHECK-NEXT: store double [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load double, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
- %l2 = load double, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
- %l4 = load double, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
- %l6 = load double, ptr %arrayidx.3, align 4
- %l1 = tail call double @llvm.ldexp.f64.i32(double %l0, i32 %exp32)
- %l3 = tail call double @llvm.ldexp.f64.i32(double %l2, i32 %exp32)
- %l5 = tail call double @llvm.ldexp.f64.i64(double %l4, i64 %exp64)
- %l7 = tail call double @llvm.ldexp.f64.i64(double %l6, i64 %exp64)
- store double %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds double, ptr %y, i64 1
- store double %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds double, ptr %y, i64 2
- store double %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds double, ptr %y, i64 3
- store double %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-declare float @llvm.ldexp.f32.i32(float, i32)
-declare double @llvm.ldexp.f64.i32(double, i32)
-declare float @llvm.ldexp.f32.i64(float, i64)
-declare double @llvm.ldexp.f64.i64(double, i64)
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll
deleted file mode 100644
index 07a3fe7..0000000
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll
+++ /dev/null
@@ -1,280 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=slp-vectorizer -mtriple=aarch64 < %s | FileCheck %s
-
-target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
-
-define void @lround_i32f32(ptr %x, ptr %y, i32 %n) {
-; CHECK-LABEL: @lround_i32f32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L0]])
-; CHECK-NEXT: [[L3:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L2]])
-; CHECK-NEXT: [[L5:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L4]])
-; CHECK-NEXT: [[L7:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L6]])
-; CHECK-NEXT: store i32 [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 1
-; CHECK-NEXT: store i32 [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 2
-; CHECK-NEXT: store i32 [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 3
-; CHECK-NEXT: store i32 [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load float, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
- %l2 = load float, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
- %l4 = load float, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
- %l6 = load float, ptr %arrayidx.3, align 4
- %l1 = tail call i32 @llvm.lround.i32.f32(float %l0)
- %l3 = tail call i32 @llvm.lround.i32.f32(float %l2)
- %l5 = tail call i32 @llvm.lround.i32.f32(float %l4)
- %l7 = tail call i32 @llvm.lround.i32.f32(float %l6)
- store i32 %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds i32, ptr %y, i64 1
- store i32 %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds i32, ptr %y, i64 2
- store i32 %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds i32, ptr %y, i64 3
- store i32 %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @lround_i32f64(ptr %x, ptr %y, i32 %n) {
-; CHECK-LABEL: @lround_i32f64(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L0]])
-; CHECK-NEXT: [[L3:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L2]])
-; CHECK-NEXT: [[L5:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L4]])
-; CHECK-NEXT: [[L7:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L6]])
-; CHECK-NEXT: store i32 [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 1
-; CHECK-NEXT: store i32 [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 2
-; CHECK-NEXT: store i32 [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 3
-; CHECK-NEXT: store i32 [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load double, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
- %l2 = load double, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
- %l4 = load double, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
- %l6 = load double, ptr %arrayidx.3, align 4
- %l1 = tail call i32 @llvm.lround.i32.f64(double %l0)
- %l3 = tail call i32 @llvm.lround.i32.f64(double %l2)
- %l5 = tail call i32 @llvm.lround.i32.f64(double %l4)
- %l7 = tail call i32 @llvm.lround.i32.f64(double %l6)
- store i32 %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds i32, ptr %y, i64 1
- store i32 %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds i32, ptr %y, i64 2
- store i32 %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds i32, ptr %y, i64 3
- store i32 %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @lround_i64f32(ptr %x, ptr %y, i64 %n) {
-; CHECK-LABEL: @lround_i64f32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L0]])
-; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L2]])
-; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L4]])
-; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L6]])
-; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
-; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
-; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
-; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load float, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
- %l2 = load float, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
- %l4 = load float, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
- %l6 = load float, ptr %arrayidx.3, align 4
- %l1 = tail call i64 @llvm.lround.i64.f32(float %l0)
- %l3 = tail call i64 @llvm.lround.i64.f32(float %l2)
- %l5 = tail call i64 @llvm.lround.i64.f32(float %l4)
- %l7 = tail call i64 @llvm.lround.i64.f32(float %l6)
- store i64 %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
- store i64 %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
- store i64 %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
- store i64 %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @lround_i64f64(ptr %x, ptr %y, i64 %n) {
-; CHECK-LABEL: @lround_i64f64(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L0]])
-; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L2]])
-; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L4]])
-; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L6]])
-; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
-; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
-; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
-; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load double, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
- %l2 = load double, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
- %l4 = load double, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
- %l6 = load double, ptr %arrayidx.3, align 4
- %l1 = tail call i64 @llvm.lround.i64.f64(double %l0)
- %l3 = tail call i64 @llvm.lround.i64.f64(double %l2)
- %l5 = tail call i64 @llvm.lround.i64.f64(double %l4)
- %l7 = tail call i64 @llvm.lround.i64.f64(double %l6)
- store i64 %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
- store i64 %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
- store i64 %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
- store i64 %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @llround_i64f32(ptr %x, ptr %y, i64 %n) {
-; CHECK-LABEL: @llround_i64f32(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L0]])
-; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L2]])
-; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L4]])
-; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L6]])
-; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
-; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
-; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
-; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load float, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
- %l2 = load float, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
- %l4 = load float, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
- %l6 = load float, ptr %arrayidx.3, align 4
- %l1 = tail call i64 @llvm.llround.i64.f32(float %l0)
- %l3 = tail call i64 @llvm.llround.i64.f32(float %l2)
- %l5 = tail call i64 @llvm.llround.i64.f32(float %l4)
- %l7 = tail call i64 @llvm.llround.i64.f32(float %l6)
- store i64 %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
- store i64 %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
- store i64 %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
- store i64 %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-define void @llround_i64f64(ptr %x, ptr %y, i64 %n) {
-; CHECK-LABEL: @llround_i64f64(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
-; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
-; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
-; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
-; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L0]])
-; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L2]])
-; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L4]])
-; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L6]])
-; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
-; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
-; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
-; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
-; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %l0 = load double, ptr %x, align 4
- %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
- %l2 = load double, ptr %arrayidx.1, align 4
- %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
- %l4 = load double, ptr %arrayidx.2, align 4
- %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
- %l6 = load double, ptr %arrayidx.3, align 4
- %l1 = tail call i64 @llvm.llround.i64.f64(double %l0)
- %l3 = tail call i64 @llvm.llround.i64.f64(double %l2)
- %l5 = tail call i64 @llvm.llround.i64.f64(double %l4)
- %l7 = tail call i64 @llvm.llround.i64.f64(double %l6)
- store i64 %l1, ptr %y, align 4
- %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
- store i64 %l3, ptr %arrayidx2.1, align 4
- %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
- store i64 %l5, ptr %arrayidx2.2, align 4
- %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
- store i64 %l7, ptr %arrayidx2.3, align 4
- ret void
-}
-
-declare i32 @llvm.lround.i32.f32(float)
-declare i64 @llvm.lround.i64.f32(float)
-declare i64 @llvm.lround.i64.f64(double)
-declare i64 @llvm.llround.i64.f32(float)
-declare i64 @llvm.llround.i64.f64(double)
diff --git a/llvm/test/Transforms/Scalarizer/intrinsics.ll b/llvm/test/Transforms/Scalarizer/intrinsics.ll
index 070c765..cee44ef 100644
--- a/llvm/test/Transforms/Scalarizer/intrinsics.ll
+++ b/llvm/test/Transforms/Scalarizer/intrinsics.ll
@@ -8,7 +8,6 @@ declare <2 x float> @llvm.sqrt.v2f32(<2 x float>)
declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>)
declare <2 x float> @llvm.minimum.v2f32(<2 x float>, <2 x float>)
declare <2 x float> @llvm.maximum.v2f32(<2 x float>, <2 x float>)
-declare <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float>, <2 x i32>)
; Ternary fp
declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>)
@@ -33,8 +32,6 @@ declare <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float>)
; Unary fp operand, int return type
declare <2 x i32> @llvm.lrint.v2i32.v2f32(<2 x float>)
declare <2 x i32> @llvm.llrint.v2i32.v2f32(<2 x float>)
-declare <2 x i32> @llvm.lround.v2i32.v2f32(<2 x float>)
-declare <2 x i32> @llvm.llround.v2i32.v2f32(<2 x float>)
; Bool return type, overloaded on fp operand type
declare <2 x i1> @llvm.is.fpclass(<2 x float>, i32)
@@ -162,22 +159,6 @@ define <2 x float> @scalarize_powi_v2f32(<2 x float> %x, i32 %y) #0 {
ret <2 x float> %powi
}
-define <2 x float> @scalarize_ldexp_v2f32(<2 x float> %x, <2 x i32> %y) #0 {
-; CHECK-LABEL: @scalarize_ldexp_v2f32(
-; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
-; CHECK-NEXT: [[Y:%.*]] = extractelement <2 x i32> [[Y1:%.*]], i64 0
-; CHECK-NEXT: [[POWI_I0:%.*]] = call float @llvm.ldexp.f32.i32(float [[X_I0]], i32 [[Y]])
-; CHECK-NEXT: [[X_I1:%.*]] = extractelement <2 x float> [[X]], i64 1
-; CHECK-NEXT: [[Y_I1:%.*]] = extractelement <2 x i32> [[Y1]], i64 1
-; CHECK-NEXT: [[POWI_I1:%.*]] = call float @llvm.ldexp.f32.i32(float [[X_I1]], i32 [[Y_I1]])
-; CHECK-NEXT: [[POWI_UPTO0:%.*]] = insertelement <2 x float> poison, float [[POWI_I0]], i64 0
-; CHECK-NEXT: [[POWI:%.*]] = insertelement <2 x float> [[POWI_UPTO0]], float [[POWI_I1]], i64 1
-; CHECK-NEXT: ret <2 x float> [[POWI]]
-;
- %powi = call <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> %x, <2 x i32> %y)
- ret <2 x float> %powi
-}
-
define <2 x i32> @scalarize_smul_fix_sat_v2i32(<2 x i32> %x) #0 {
; CHECK-LABEL: @scalarize_smul_fix_sat_v2i32(
; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x i32> [[X:%.*]], i64 0
@@ -262,34 +243,6 @@ define <2 x i32> @scalarize_llrint(<2 x float> %x) #0 {
ret <2 x i32> %rnd
}
-define <2 x i32> @scalarize_lround(<2 x float> %x) #0 {
-; CHECK-LABEL: @scalarize_lround(
-; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
-; CHECK-NEXT: [[RND_I0:%.*]] = call i32 @llvm.lround.i32.f32(float [[X_I0]])
-; CHECK-NEXT: [[X_I1:%.*]] = extractelement <2 x float> [[X]], i64 1
-; CHECK-NEXT: [[RND_I1:%.*]] = call i32 @llvm.lround.i32.f32(float [[X_I1]])
-; CHECK-NEXT: [[RND_UPTO0:%.*]] = insertelement <2 x i32> poison, i32 [[RND_I0]], i64 0
-; CHECK-NEXT: [[RND:%.*]] = insertelement <2 x i32> [[RND_UPTO0]], i32 [[RND_I1]], i64 1
-; CHECK-NEXT: ret <2 x i32> [[RND]]
-;
- %rnd = call <2 x i32> @llvm.lround.v2i32.v2f32(<2 x float> %x)
- ret <2 x i32> %rnd
-}
-
-define <2 x i32> @scalarize_llround(<2 x float> %x) #0 {
-; CHECK-LABEL: @scalarize_llround(
-; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
-; CHECK-NEXT: [[RND_I0:%.*]] = call i32 @llvm.llround.i32.f32(float [[X_I0]])
-; CHECK-NEXT: [[X_I1:%.*]] = extractelement <2 x float> [[X]], i64 1
-; CHECK-NEXT: [[RND_I1:%.*]] = call i32 @llvm.llround.i32.f32(float [[X_I1]])
-; CHECK-NEXT: [[RND_UPTO0:%.*]] = insertelement <2 x i32> poison, i32 [[RND_I0]], i64 0
-; CHECK-NEXT: [[RND:%.*]] = insertelement <2 x i32> [[RND_UPTO0]], i32 [[RND_I1]], i64 1
-; CHECK-NEXT: ret <2 x i32> [[RND]]
-;
- %rnd = call <2 x i32> @llvm.llround.v2i32.v2f32(<2 x float> %x)
- ret <2 x i32> %rnd
-}
-
define <2 x i1> @scalarize_is_fpclass(<2 x float> %x) #0 {
; CHECK-LABEL: @scalarize_is_fpclass(
; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
diff --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-or-as-add.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-or-as-add.ll
index b309682..2fad306c5 100644
--- a/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-or-as-add.ll
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-or-as-add.ll
@@ -47,10 +47,8 @@ define void @testDisjointOrSplits(ptr %p) {
; CHECK-LABEL: define void @testDisjointOrSplits(
; CHECK-SAME: ptr [[P:%.*]]) {
; CHECK-NEXT: [[VAR:%.*]] = tail call i64 @foo()
-; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
-; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP1]], [[VAR]]
-; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[TMP2]], 10
-; CHECK-NEXT: [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[P]], i64 [[VAR]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[UGLYGEP]], i64 10
; CHECK-NEXT: store i8 0, ptr [[TMP4]], align 1
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-sub.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-sub.ll
index b0e88ef..a6b38bc 100644
--- a/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-sub.ll
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/split-gep-sub.ll
@@ -31,11 +31,9 @@ define void @test_A_sub_B_add_ConstantInt(ptr %p) {
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[MUL]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[REM]] to i64
; CHECK-NEXT: [[SUB22:%.*]] = sub i64 [[TMP2]], [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[SUB22]], 2
-; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP3]], [[TMP4]]
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP5]], 2044
-; CHECK-NEXT: [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
+; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 2044
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[UGLYGEP]], i64 [[TMP4]]
; CHECK-NEXT: store float 1.000000e+00, ptr [[TMP7]], align 4
; CHECK-NEXT: br label [[COND_END]]
; CHECK: cond.end:
diff --git a/llvm/test/Transforms/SimplifyCFG/jump-threading-live-on-exit.ll b/llvm/test/Transforms/SimplifyCFG/jump-threading-live-on-exit.ll
new file mode 100644
index 0000000..32b7719
--- /dev/null
+++ b/llvm/test/Transforms/SimplifyCFG/jump-threading-live-on-exit.ll
@@ -0,0 +1,195 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=simplifycfg -S < %s | FileCheck %s
+
+; Allow jump-threading when values defined in the block are live outside of the block
+; to those destinations in which the values are dead.
+
+define void @testA(ptr %ptrA, ptr %ptrB, i64 %a, i64 %b) {
+; CHECK-LABEL: define void @testA(
+; CHECK-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
+; CHECK-NEXT: [[MAINA:.*:]]
+; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINC:.*]]
+; CHECK: [[IFA]]:
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK-NEXT: store i64 [[TMP0]], ptr [[PTRB]], align 4
+; CHECK-NEXT: br label %[[MAINC]]
+; CHECK: [[MAINC]]:
+; CHECK-NEXT: ret void
+;
+mainA:
+ %cond = icmp slt i64 %a, %b
+ br i1 %cond, label %ifA, label %mainB
+
+ifA:
+ %518 = load i64, ptr %ptrA
+ br label %mainB
+
+; %value is live outside of block mainB, but jump-threading
+; can still occur to destination mainC, since %value is dead there.
+; Subsequent CFG simplifications will create one if block.
+mainB:
+ %value = phi i64 [ %518, %ifA ], [ zeroinitializer, %mainA ]
+ br i1 %cond, label %ifB, label %mainC
+
+ifB:
+ store i64 %value, ptr %ptrB
+ br label %mainC
+
+mainC:
+ ret void
+}
+
+
+define void @testB(ptr %ptrA, ptr %ptrB, i64 %a, i64 %b, i64 %c) {
+; CHECK-LABEL: define void @testB(
+; CHECK-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) {
+; CHECK-NEXT: [[MAINA:.*:]]
+; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINC:.*]]
+; CHECK: [[IFA]]:
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK-NEXT: [[COND2:%.*]] = icmp slt i64 [[A]], [[C]]
+; CHECK-NEXT: [[PTR_ARM1:%.*]] = getelementptr i64, ptr [[PTRB]], i64 8
+; CHECK-NEXT: [[PTR_ARM2:%.*]] = getelementptr i64, ptr [[PTRB]], i64 16
+; CHECK-NEXT: [[PTRC:%.*]] = select i1 [[COND2]], ptr [[PTR_ARM1]], ptr [[PTR_ARM2]]
+; CHECK-NEXT: store i64 [[TMP0]], ptr [[PTRC]], align 4
+; CHECK-NEXT: br label %[[MAINC]]
+; CHECK: [[MAINC]]:
+; CHECK-NEXT: ret void
+;
+mainA:
+ %cond = icmp slt i64 %a, %b
+ br i1 %cond, label %ifA, label %mainB
+
+ifA:
+ %518 = load i64, ptr %ptrA
+ br label %mainB
+
+; Use of %value is not in either immediate destination of mainB.
+mainB:
+ %value = phi i64 [ %518, %ifA ], [ zeroinitializer, %mainA ]
+ br i1 %cond, label %ifB, label %mainC
+
+ifB:
+ %cond2 = icmp slt i64 %a, %c
+ br i1 %cond2, label %ifB_arm1, label %ifB_arm2
+
+ifB_arm1:
+ %ptr_arm1 = getelementptr i64, ptr %ptrB, i64 8
+ br label %ifB_join
+
+ifB_arm2:
+ %ptr_arm2 = getelementptr i64, ptr %ptrB, i64 16
+ br label %ifB_join
+
+ifB_join:
+ %ptrC = phi ptr [ %ptr_arm1, %ifB_arm1 ], [ %ptr_arm2, %ifB_arm2 ]
+ store i64 %value, ptr %ptrC
+ br label %mainC
+
+mainC:
+ ret void
+}
+
+
+; Jump-threading is not done since %value is live in both destinations.
+define void @testA_negative(ptr %ptrA, ptr %ptrB, ptr %ptrD, i64 %a, i64 %b) {
+; CHECK-LABEL: define void @testA_negative(
+; CHECK-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], ptr [[PTRD:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
+; CHECK-NEXT: [[MAINA:.*]]:
+; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINB:.*]]
+; CHECK: [[IFA]]:
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK-NEXT: br label %[[MAINB]]
+; CHECK: [[MAINB]]:
+; CHECK-NEXT: [[VALUE:%.*]] = phi i64 [ [[TMP0]], %[[IFA]] ], [ 0, %[[MAINA]] ]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFB:.*]], label %[[MAINC:.*]]
+; CHECK: [[IFB]]:
+; CHECK-NEXT: store i64 [[VALUE]], ptr [[PTRB]], align 4
+; CHECK-NEXT: br label %[[MAINC]]
+; CHECK: [[MAINC]]:
+; CHECK-NEXT: store i64 [[VALUE]], ptr [[PTRD]], align 4
+; CHECK-NEXT: ret void
+;
+mainA:
+ %cond = icmp slt i64 %a, %b
+ br i1 %cond, label %ifA, label %mainB
+
+ifA:
+ %518 = load i64, ptr %ptrA
+ br label %mainB
+
+mainB:
+ %value = phi i64 [ %518, %ifA ], [ zeroinitializer, %mainA ]
+ br i1 %cond, label %ifB, label %mainC
+
+ifB:
+ store i64 %value, ptr %ptrB
+ br label %mainC
+
+mainC:
+ store i64 %value, ptr %ptrD
+ ret void
+}
+
+
+; Jump-threading is not done since %value is live in both destinations.
+define void @testB_negative(ptr %ptrA, ptr %ptrB, ptr %ptrD, i64 %a, i64 %b, i64 %c) {
+; CHECK-LABEL: define void @testB_negative(
+; CHECK-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], ptr [[PTRD:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) {
+; CHECK-NEXT: [[MAINA:.*]]:
+; CHECK-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINB:.*]]
+; CHECK: [[IFA]]:
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK-NEXT: br label %[[MAINB]]
+; CHECK: [[MAINB]]:
+; CHECK-NEXT: [[VALUE:%.*]] = phi i64 [ [[TMP0]], %[[IFA]] ], [ 0, %[[MAINA]] ]
+; CHECK-NEXT: br i1 [[COND]], label %[[IFB:.*]], label %[[MAINC:.*]]
+; CHECK: [[IFB]]:
+; CHECK-NEXT: [[COND2:%.*]] = icmp slt i64 [[A]], [[C]]
+; CHECK-NEXT: [[PTR_ARM1:%.*]] = getelementptr i64, ptr [[PTRB]], i64 8
+; CHECK-NEXT: [[PTR_ARM2:%.*]] = getelementptr i64, ptr [[PTRB]], i64 16
+; CHECK-NEXT: [[PTRC:%.*]] = select i1 [[COND2]], ptr [[PTR_ARM1]], ptr [[PTR_ARM2]]
+; CHECK-NEXT: store i64 [[VALUE]], ptr [[PTRC]], align 4
+; CHECK-NEXT: br label %[[MAINC]]
+; CHECK: [[MAINC]]:
+; CHECK-NEXT: store i64 [[VALUE]], ptr [[PTRD]], align 4
+; CHECK-NEXT: ret void
+;
+mainA:
+ %cond = icmp slt i64 %a, %b
+ br i1 %cond, label %ifA, label %mainB
+
+ifA:
+ %518 = load i64, ptr %ptrA
+ br label %mainB
+
+mainB:
+ %value = phi i64 [ %518, %ifA ], [ zeroinitializer, %mainA ]
+ br i1 %cond, label %ifB, label %mainC
+
+ifB:
+ %cond2 = icmp slt i64 %a, %c
+ br i1 %cond2, label %ifB_arm1, label %ifB_arm2
+
+ifB_arm1:
+ %ptr_arm1 = getelementptr i64, ptr %ptrB, i64 8
+ br label %ifB_join
+
+ifB_arm2:
+ %ptr_arm2 = getelementptr i64, ptr %ptrB, i64 16
+ br label %ifB_join
+
+ifB_join:
+ %ptrC = phi ptr [ %ptr_arm1, %ifB_arm1 ], [ %ptr_arm2, %ifB_arm2 ]
+ store i64 %value, ptr %ptrC
+ br label %mainC
+
+mainC:
+ store i64 %value, ptr %ptrD
+ ret void
+}
+
diff --git a/llvm/test/Transforms/SimplifyCFG/jump-threading-max-jump-threading-live-blocks.ll b/llvm/test/Transforms/SimplifyCFG/jump-threading-max-jump-threading-live-blocks.ll
new file mode 100644
index 0000000..6868693
--- /dev/null
+++ b/llvm/test/Transforms/SimplifyCFG/jump-threading-max-jump-threading-live-blocks.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=simplifycfg -S -max-jump-threading-live-blocks=3 < %s | FileCheck %s --check-prefixes=CHECK_LIMIT_3
+; RUN: opt -passes=simplifycfg -S -max-jump-threading-live-blocks=4 < %s | FileCheck %s --check-prefixes=CHECK_LIMIT_4
+
+; Test option -max-jump-threading-live-blocks=<num>
+
+define void @testB(ptr %ptrA, ptr %ptrB, i64 %a, i64 %b, i64 %c) {
+; CHECK_LIMIT_3-LABEL: define void @testB(
+; CHECK_LIMIT_3-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) {
+; CHECK_LIMIT_3-NEXT: [[MAINA:.*]]:
+; CHECK_LIMIT_3-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK_LIMIT_3-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINB:.*]]
+; CHECK_LIMIT_3: [[IFA]]:
+; CHECK_LIMIT_3-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK_LIMIT_3-NEXT: br label %[[MAINB]]
+; CHECK_LIMIT_3: [[MAINB]]:
+; CHECK_LIMIT_3-NEXT: [[VALUE:%.*]] = phi i64 [ [[TMP0]], %[[IFA]] ], [ 0, %[[MAINA]] ]
+; CHECK_LIMIT_3-NEXT: br i1 [[COND]], label %[[IFB:.*]], label %[[MAINC:.*]]
+; CHECK_LIMIT_3: [[IFB]]:
+; CHECK_LIMIT_3-NEXT: [[COND2:%.*]] = icmp slt i64 [[A]], [[C]]
+; CHECK_LIMIT_3-NEXT: br i1 [[COND2]], label %[[IFB_ARM1:.*]], label %[[IFB_ARM2:.*]]
+; CHECK_LIMIT_3: [[IFB_ARM1]]:
+; CHECK_LIMIT_3-NEXT: [[PTR_ARM1:%.*]] = getelementptr i64, ptr [[PTRB]], i64 8
+; CHECK_LIMIT_3-NEXT: store i128 0, ptr [[PTR_ARM1]], align 4
+; CHECK_LIMIT_3-NEXT: br label %[[IFB_JOIN:.*]]
+; CHECK_LIMIT_3: [[IFB_ARM2]]:
+; CHECK_LIMIT_3-NEXT: [[PTR_ARM2:%.*]] = getelementptr i64, ptr [[PTRB]], i64 16
+; CHECK_LIMIT_3-NEXT: store i128 0, ptr [[PTR_ARM2]], align 4
+; CHECK_LIMIT_3-NEXT: br label %[[IFB_JOIN]]
+; CHECK_LIMIT_3: [[IFB_JOIN]]:
+; CHECK_LIMIT_3-NEXT: [[PTRC:%.*]] = phi ptr [ [[PTR_ARM1]], %[[IFB_ARM1]] ], [ [[PTR_ARM2]], %[[IFB_ARM2]] ]
+; CHECK_LIMIT_3-NEXT: store i64 [[VALUE]], ptr [[PTRC]], align 4
+; CHECK_LIMIT_3-NEXT: br label %[[MAINC]]
+; CHECK_LIMIT_3: [[MAINC]]:
+; CHECK_LIMIT_3-NEXT: ret void
+;
+; CHECK_LIMIT_4-LABEL: define void @testB(
+; CHECK_LIMIT_4-SAME: ptr [[PTRA:%.*]], ptr [[PTRB:%.*]], i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) {
+; CHECK_LIMIT_4-NEXT: [[MAINA:.*:]]
+; CHECK_LIMIT_4-NEXT: [[COND:%.*]] = icmp slt i64 [[A]], [[B]]
+; CHECK_LIMIT_4-NEXT: br i1 [[COND]], label %[[IFA:.*]], label %[[MAINC:.*]]
+; CHECK_LIMIT_4: [[IFA]]:
+; CHECK_LIMIT_4-NEXT: [[TMP0:%.*]] = load i64, ptr [[PTRA]], align 4
+; CHECK_LIMIT_4-NEXT: [[COND2:%.*]] = icmp slt i64 [[A]], [[C]]
+; CHECK_LIMIT_4-NEXT: br i1 [[COND2]], label %[[IFB_ARM1:.*]], label %[[IFB_ARM2:.*]]
+; CHECK_LIMIT_4: [[IFB_ARM1]]:
+; CHECK_LIMIT_4-NEXT: [[PTR_ARM1:%.*]] = getelementptr i64, ptr [[PTRB]], i64 8
+; CHECK_LIMIT_4-NEXT: store i128 0, ptr [[PTR_ARM1]], align 4
+; CHECK_LIMIT_4-NEXT: br label %[[IFB_JOIN:.*]]
+; CHECK_LIMIT_4: [[IFB_ARM2]]:
+; CHECK_LIMIT_4-NEXT: [[PTR_ARM2:%.*]] = getelementptr i64, ptr [[PTRB]], i64 16
+; CHECK_LIMIT_4-NEXT: store i128 0, ptr [[PTR_ARM2]], align 4
+; CHECK_LIMIT_4-NEXT: br label %[[IFB_JOIN]]
+; CHECK_LIMIT_4: [[IFB_JOIN]]:
+; CHECK_LIMIT_4-NEXT: [[PTRC:%.*]] = phi ptr [ [[PTR_ARM1]], %[[IFB_ARM1]] ], [ [[PTR_ARM2]], %[[IFB_ARM2]] ]
+; CHECK_LIMIT_4-NEXT: store i64 [[TMP0]], ptr [[PTRC]], align 4
+; CHECK_LIMIT_4-NEXT: br label %[[MAINC]]
+; CHECK_LIMIT_4: [[MAINC]]:
+; CHECK_LIMIT_4-NEXT: ret void
+;
+mainA:
+ %cond = icmp slt i64 %a, %b
+ br i1 %cond, label %ifA, label %mainB
+
+ifA:
+ %518 = load i64, ptr %ptrA
+ br label %mainB
+
+; Use of %value is not in either immediate destination of mainB.
+mainB:
+ %value = phi i64 [ %518, %ifA ], [ zeroinitializer, %mainA ]
+ br i1 %cond, label %ifB, label %mainC
+
+ifB:
+ %cond2 = icmp slt i64 %a, %c
+ br i1 %cond2, label %ifB_arm1, label %ifB_arm2
+
+ifB_arm1:
+ %ptr_arm1 = getelementptr i64, ptr %ptrB, i64 8
+ store i128 0, ptr %ptr_arm1
+ br label %ifB_join
+
+ifB_arm2:
+ %ptr_arm2 = getelementptr i64, ptr %ptrB, i64 16
+ store i128 0, ptr %ptr_arm2
+ br label %ifB_join
+
+ifB_join:
+ %ptrC = phi ptr [ %ptr_arm1, %ifB_arm1 ], [ %ptr_arm2, %ifB_arm2 ]
+ store i64 %value, ptr %ptrC
+ br label %mainC
+
+mainC:
+ ret void
+}
diff --git a/llvm/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll b/llvm/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll
index efc04e9..74693c1 100644
--- a/llvm/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll
+++ b/llvm/test/Transforms/ThinLTOBitcodeWriter/function-alias.ll
@@ -7,11 +7,16 @@ define hidden void @Func() !type !0 {
ret void
}
-; CHECK1: !aliases = !{![[A1:[0-9]+]], ![[A2:[0-9]+]], ![[A3:[0-9]+]]}
+; CHECK1: !cfi.functions = !{![[F1:[0-9]+]], ![[F2:[0-9]+]], ![[F3:[0-9]+]], ![[F4:[0-9]+]]}
+; CHECK1: !aliases = !{![[A:[0-9]+]]}
-; CHECK1: ![[A1]] = !{!"Alias", !"Func", i8 1, i8 0}
-; CHECK1: ![[A2]] = !{!"Hidden_Alias", !"Func", i8 1, i8 0}
-; CHECK1: ![[A3]] = !{!"Weak_Alias", !"Func", i8 0, i8 1}
+; CHECK1: ![[F1]] = !{!"Func", i8 0, ![[T:[0-9]+]]}
+; CHECK1: ![[T]] = !{i64 0, !"_ZTSFvvE"}
+; CHECK1: ![[F2]] = !{!"Alias", i8 0, ![[T]]}
+; CHECK1: ![[F3]] = !{!"Hidden_Alias", i8 0, ![[T]]}
+; CHECK1: ![[F4]] = !{!"Weak_Alias", i8 0, ![[T]]}
+;
+; CHECK1: ![[A]] = !{!"Func", !"Alias", !"Hidden_Alias", !"Weak_Alias"}
@Alias = hidden alias void (), ptr @Func
@Hidden_Alias = hidden alias void (), ptr @Func
@Weak_Alias = weak alias void (), ptr @Func
diff --git a/llvm/test/Transforms/VectorCombine/SPIRV/lit.local.cfg b/llvm/test/Transforms/VectorCombine/SPIRV/lit.local.cfg
new file mode 100644
index 0000000..78dd74c
--- /dev/null
+++ b/llvm/test/Transforms/VectorCombine/SPIRV/lit.local.cfg
@@ -0,0 +1,2 @@
+if not "SPIRV" in config.root.targets:
+ config.unsupported = True
diff --git a/llvm/test/Transforms/VectorCombine/SPIRV/load-insert-store.ll b/llvm/test/Transforms/VectorCombine/SPIRV/load-insert-store.ll
new file mode 100644
index 0000000..6f4c80d
--- /dev/null
+++ b/llvm/test/Transforms/VectorCombine/SPIRV/load-insert-store.ll
@@ -0,0 +1,889 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=vector-combine -data-layout=E -mtriple=spirv-unknown-vulkan1.3-library %s | FileCheck %s --check-prefix=SPIRV
+
+define void @insert_store(ptr %q, i8 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 3
+ store <16 x i8> %vecins, ptr %q, align 16
+ ret void
+}
+
+define void @insert_store_i16_align1(ptr %q, i16 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_i16_align1(
+; SPIRV-SAME: ptr [[Q:%.*]], i16 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[S]], i32 3
+; SPIRV-NEXT: store <8 x i16> [[VECINS]], ptr [[Q]], align 1
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <8 x i16>, ptr %q
+ %vecins = insertelement <8 x i16> %0, i16 %s, i32 3
+ store <8 x i16> %vecins, ptr %q, align 1
+ ret void
+}
+
+; To verify case when index is out of bounds
+define void @insert_store_outofbounds(ptr %q, i16 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_outofbounds(
+; SPIRV-SAME: ptr [[Q:%.*]], i16 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[S]], i32 9
+; SPIRV-NEXT: store <8 x i16> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <8 x i16>, ptr %q
+ %vecins = insertelement <8 x i16> %0, i16 %s, i32 9
+ store <8 x i16> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_vscale(ptr %q, i16 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_vscale(
+; SPIRV-SAME: ptr [[Q:%.*]], i16 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 8 x i16>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 8 x i16> [[TMP0]], i16 [[S]], i32 3
+; SPIRV-NEXT: store <vscale x 8 x i16> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 8 x i16>, ptr %q
+ %vecins = insertelement <vscale x 8 x i16> %0, i16 %s, i32 3
+ store <vscale x 8 x i16> %vecins, ptr %q
+ ret void
+}
+
+; To verify the case that index exceeds the minimum number
+; of elements of a scalable vector type.
+define void @insert_store_vscale_exceeds(ptr %q, i16 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_vscale_exceeds(
+; SPIRV-SAME: ptr [[Q:%.*]], i16 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 8 x i16>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 8 x i16> [[TMP0]], i16 [[S]], i32 9
+; SPIRV-NEXT: store <vscale x 8 x i16> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 8 x i16>, ptr %q
+ %vecins = insertelement <vscale x 8 x i16> %0, i16 %s, i32 9
+ store <vscale x 8 x i16> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_v9i4(ptr %q, i4 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_v9i4(
+; SPIRV-SAME: ptr [[Q:%.*]], i4 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <9 x i4>, ptr [[Q]], align 8
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <9 x i4> [[TMP0]], i4 [[S]], i32 3
+; SPIRV-NEXT: store <9 x i4> [[VECINS]], ptr [[Q]], align 1
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <9 x i4>, ptr %q
+ %vecins = insertelement <9 x i4> %0, i4 %s, i32 3
+ store <9 x i4> %vecins, ptr %q, align 1
+ ret void
+}
+
+define void @insert_store_v4i27(ptr %q, i27 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_v4i27(
+; SPIRV-SAME: ptr [[Q:%.*]], i27 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <4 x i27>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <4 x i27> [[TMP0]], i27 [[S]], i32 3
+; SPIRV-NEXT: store <4 x i27> [[VECINS]], ptr [[Q]], align 1
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <4 x i27>, ptr %q
+ %vecins = insertelement <4 x i27> %0, i27 %s, i32 3
+ store <4 x i27> %vecins, ptr %q, align 1
+ ret void
+}
+
+define void @insert_store_v32i1(ptr %p) {
+; SPIRV-LABEL: define void @insert_store_v32i1(
+; SPIRV-SAME: ptr [[P:%.*]]) {
+; SPIRV-NEXT: [[VEC:%.*]] = load <32 x i1>, ptr [[P]], align 4
+; SPIRV-NEXT: [[INS:%.*]] = insertelement <32 x i1> [[VEC]], i1 true, i64 0
+; SPIRV-NEXT: store <32 x i1> [[INS]], ptr [[P]], align 4
+; SPIRV-NEXT: ret void
+;
+ %vec = load <32 x i1>, ptr %p
+ %ins = insertelement <32 x i1> %vec, i1 true, i64 0
+ store <32 x i1> %ins, ptr %p
+ ret void
+}
+
+define void @insert_store_blk_differ(ptr %q, i16 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_blk_differ(
+; SPIRV-SAME: ptr [[Q:%.*]], i16 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <8 x i16>, ptr [[Q]], align 16
+; SPIRV-NEXT: br label %[[CONT:.*]]
+; SPIRV: [[CONT]]:
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[S]], i32 3
+; SPIRV-NEXT: store <8 x i16> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <8 x i16>, ptr %q
+ br label %cont
+cont:
+ %vecins = insertelement <8 x i16> %0, i16 %s, i32 3
+ store <8 x i16> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the case that the index is not a constant, and
+; the vector type is scalable.
+define void @insert_store_vscale_nonconst(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify align here is narrowed to scalar store size
+define void @insert_store_nonconst_large_alignment(ptr %q, i32 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_large_alignment(
+; SPIRV-SAME: ptr [[Q:%.*]], i32 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 4
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[I:%.*]] = load <4 x i32>, ptr [[Q]], align 128
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <4 x i32> [[I]], i32 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <4 x i32> [[VECINS]], ptr [[Q]], align 128
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 4
+ call void @llvm.assume(i1 %cmp)
+ %i = load <4 x i32>, ptr %q, align 128
+ %vecins = insertelement <4 x i32> %i, i32 %s, i32 %idx
+ store <4 x i32> %vecins, ptr %q, align 128
+ ret void
+}
+
+define void @insert_store_nonconst_align_maximum_8(ptr %q, i64 %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_align_maximum_8(
+; SPIRV-SAME: ptr [[Q:%.*]], i64 [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 2
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[I:%.*]] = load <8 x i64>, ptr [[Q]], align 8
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i64> [[I]], i64 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <8 x i64> [[VECINS]], ptr [[Q]], align 8
+; SPIRV-NEXT: ret void
+;
+ %cmp = icmp ult i32 %idx, 2
+ call void @llvm.assume(i1 %cmp)
+ %i = load <8 x i64>, ptr %q, align 8
+ %vecins = insertelement <8 x i64> %i, i64 %s, i32 %idx
+ store <8 x i64> %vecins, ptr %q, align 8
+ ret void
+}
+
+define void @insert_store_nonconst_align_maximum_4(ptr %q, i64 %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_align_maximum_4(
+; SPIRV-SAME: ptr [[Q:%.*]], i64 [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 2
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[I:%.*]] = load <8 x i64>, ptr [[Q]], align 4
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i64> [[I]], i64 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <8 x i64> [[VECINS]], ptr [[Q]], align 4
+; SPIRV-NEXT: ret void
+;
+ %cmp = icmp ult i32 %idx, 2
+ call void @llvm.assume(i1 %cmp)
+ %i = load <8 x i64>, ptr %q, align 4
+ %vecins = insertelement <8 x i64> %i, i64 %s, i32 %idx
+ store <8 x i64> %vecins, ptr %q, align 4
+ ret void
+}
+
+define void @insert_store_nonconst_align_larger(ptr %q, i64 %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_align_larger(
+; SPIRV-SAME: ptr [[Q:%.*]], i64 [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 2
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[I:%.*]] = load <8 x i64>, ptr [[Q]], align 4
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <8 x i64> [[I]], i64 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <8 x i64> [[VECINS]], ptr [[Q]], align 2
+; SPIRV-NEXT: ret void
+;
+ %cmp = icmp ult i32 %idx, 2
+ call void @llvm.assume(i1 %cmp)
+ %i = load <8 x i64>, ptr %q, align 4
+ %vecins = insertelement <8 x i64> %i, i64 %s, i32 %idx
+ store <8 x i64> %vecins, ptr %q, align 2
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_valid_by_assume(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_valid_by_assume(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 4
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 4
+ call void @llvm.assume(i1 %cmp)
+ %0 = load <16 x i8>, ptr %q
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant but valid by assume,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_known_valid_by_assume(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_known_valid_by_assume(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 4
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 4
+ call void @llvm.assume(i1 %cmp)
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+declare void @maythrow() readnone
+
+define void @insert_store_nonconst_index_not_known_valid_by_assume_after_load(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_not_known_valid_by_assume_after_load(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 4
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: call void @maythrow()
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 4
+ %0 = load <16 x i8>, ptr %q
+ call void @maythrow()
+ call void @llvm.assume(i1 %cmp)
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_not_known_valid_by_assume(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_not_known_valid_by_assume(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 17
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 17
+ call void @llvm.assume(i1 %cmp)
+ %0 = load <16 x i8>, ptr %q
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant and may not be valid by assume,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_not_known_valid_by_assume(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_not_known_valid_by_assume(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[CMP:%.*]] = icmp ult i32 [[IDX]], 17
+; SPIRV-NEXT: call void @llvm.assume(i1 [[CMP]])
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %cmp = icmp ult i32 %idx, 17
+ call void @llvm.assume(i1 %cmp)
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+declare void @llvm.assume(i1)
+
+define void @insert_store_nonconst_index_known_noundef_and_valid_by_and(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_noundef_and_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 7
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 7
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant but valid by and,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_and(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 7
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 7
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_base_frozen_and_valid_by_and(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_base_frozen_and_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_FROZEN:%.*]] = freeze i32 [[IDX]]
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX_FROZEN]], 7
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.frozen = freeze i32 %idx
+ %idx.clamped = and i32 %idx.frozen, 7
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_frozen_and_valid_by_and(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_frozen_and_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 7
+; SPIRV-NEXT: [[IDX_CLAMPED_FROZEN:%.*]] = freeze i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED_FROZEN]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 7
+ %idx.clamped.frozen = freeze i32 %idx.clamped
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped.frozen
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_valid_by_and_but_may_be_poison(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_valid_by_and_but_may_be_poison(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 7
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 7
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_not_known_valid_by_and(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_not_known_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 16
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_noundef_not_known_valid_by_and(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_noundef_not_known_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 16
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant and may not be valid by and,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_not_known_valid_by_and(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_not_known_valid_by_and(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = and i32 [[IDX]], 31
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %idx.clamped = and i32 %idx, 31
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_noundef_and_valid_by_urem(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_noundef_and_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 16
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant but valid by urem,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_urem(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 16
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_base_frozen_and_valid_by_urem(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_base_frozen_and_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_FROZEN:%.*]] = freeze i32 [[IDX]]
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX_FROZEN]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.frozen = freeze i32 %idx
+ %idx.clamped = urem i32 %idx.frozen, 16
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_frozen_and_valid_by_urem(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_frozen_and_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 16
+; SPIRV-NEXT: [[IDX_CLAMPED_FROZEN:%.*]] = freeze i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED_FROZEN]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 16
+ %idx.clamped.frozen = freeze i32 %idx.clamped
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped.frozen
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_valid_by_urem_but_may_be_poison(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_valid_by_urem_but_may_be_poison(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 16
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_not_known_valid_by_urem(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_not_known_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 17
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 17
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+; To verify the index is not a constant and may not be vaild by urem,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_not_known_valid_by_urem(ptr %q, i8 zeroext %s, i32 %idx) {
+; SPIRV-LABEL: define void @insert_store_vscale_nonconst_index_not_known_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 17
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <vscale x 16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 17
+ %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <vscale x 16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_nonconst_index_known_noundef_not_known_valid_by_urem(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; SPIRV-LABEL: define void @insert_store_nonconst_index_known_noundef_not_known_valid_by_urem(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]], i32 noundef [[IDX:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[IDX_CLAMPED:%.*]] = urem i32 [[IDX]], 17
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 [[IDX_CLAMPED]]
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %idx.clamped = urem i32 %idx, 17
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 %idx.clamped
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @insert_store_ptr_strip(ptr %q, i8 zeroext %s) {
+; SPIRV-LABEL: define void @insert_store_ptr_strip(
+; SPIRV-SAME: ptr [[Q:%.*]], i8 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[VECINS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %vecins = insertelement <16 x i8> %0, i8 %s, i32 3
+ store <16 x i8> %vecins, ptr %q
+ ret void
+}
+
+define void @volatile_update(ptr %q, ptr %p, i8 zeroext %s) {
+; SPIRV-LABEL: define void @volatile_update(
+; SPIRV-SAME: ptr [[Q:%.*]], ptr [[P:%.*]], i8 zeroext [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: [[VECINS0:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[S]], i32 3
+; SPIRV-NEXT: store volatile <16 x i8> [[VECINS0]], ptr [[Q]], align 16
+; SPIRV-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr [[P]], align 16
+; SPIRV-NEXT: [[VECINS1:%.*]] = insertelement <16 x i8> [[TMP1]], i8 [[S]], i32 1
+; SPIRV-NEXT: store <16 x i8> [[VECINS1]], ptr [[P]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %0 = load <16 x i8>, ptr %q
+ %vecins0 = insertelement <16 x i8> %0, i8 %s, i32 3
+ store volatile <16 x i8> %vecins0, ptr %q
+
+ %1 = load volatile <16 x i8>, ptr %p
+ %vecins1 = insertelement <16 x i8> %1, i8 %s, i32 1
+ store <16 x i8> %vecins1, ptr %p
+ ret void
+}
+
+define void @insert_store_addr_differ(ptr %p, ptr %q, i8 %s) {
+; SPIRV-LABEL: define void @insert_store_addr_differ(
+; SPIRV-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i8 [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[LD:%.*]] = load <16 x i8>, ptr [[P]], align 16
+; SPIRV-NEXT: [[INS:%.*]] = insertelement <16 x i8> [[LD]], i8 [[S]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[INS]], ptr [[Q]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %ld = load <16 x i8>, ptr %p
+ %ins = insertelement <16 x i8> %ld, i8 %s, i32 3
+ store <16 x i8> %ins, ptr %q
+ ret void
+}
+
+; We can't transform if any instr could modify memory in between.
+define void @insert_store_mem_modify(ptr %p, ptr %q, ptr noalias %r, i8 %s, i32 %m) {
+; SPIRV-LABEL: define void @insert_store_mem_modify(
+; SPIRV-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], ptr noalias [[R:%.*]], i8 [[S:%.*]], i32 [[M:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[LD:%.*]] = load <16 x i8>, ptr [[P]], align 16
+; SPIRV-NEXT: store <16 x i8> zeroinitializer, ptr [[Q]], align 16
+; SPIRV-NEXT: [[INS:%.*]] = insertelement <16 x i8> [[LD]], i8 [[S]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[INS]], ptr [[P]], align 16
+; SPIRV-NEXT: [[LD2:%.*]] = load <16 x i8>, ptr [[Q]], align 16
+; SPIRV-NEXT: store <16 x i8> zeroinitializer, ptr [[R]], align 16
+; SPIRV-NEXT: [[INS2:%.*]] = insertelement <16 x i8> [[LD2]], i8 [[S]], i32 7
+; SPIRV-NEXT: store <16 x i8> [[INS2]], ptr [[Q]], align 16
+; SPIRV-NEXT: [[LD3:%.*]] = load <4 x i32>, ptr [[P]], align 16
+; SPIRV-NEXT: store <16 x i8> zeroinitializer, ptr [[P]], align 16
+; SPIRV-NEXT: [[INS3:%.*]] = insertelement <4 x i32> [[LD3]], i32 [[M]], i32 0
+; SPIRV-NEXT: store <4 x i32> [[INS3]], ptr [[P]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ ; p may alias q
+ %ld = load <16 x i8>, ptr %p
+ store <16 x i8> zeroinitializer, ptr %q
+ %ins = insertelement <16 x i8> %ld, i8 %s, i32 3
+ store <16 x i8> %ins, ptr %p
+
+ ; p never aliases r
+ %ld2 = load <16 x i8>, ptr %q
+ store <16 x i8> zeroinitializer, ptr %r
+ %ins2 = insertelement <16 x i8> %ld2, i8 %s, i32 7
+ store <16 x i8> %ins2, ptr %q
+
+ ; p must alias ptr0
+ %ld3 = load <4 x i32>, ptr %p
+ store <16 x i8> zeroinitializer, ptr %p
+ %ins3 = insertelement <4 x i32> %ld3, i32 %m, i32 0
+ store <4 x i32> %ins3, ptr %p
+
+ ret void
+}
+
+; Check cases when calls may modify memory
+define void @insert_store_with_call(ptr %p, ptr %q, i8 %s) {
+; SPIRV-LABEL: define void @insert_store_with_call(
+; SPIRV-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i8 [[S:%.*]]) {
+; SPIRV-NEXT: [[ENTRY:.*:]]
+; SPIRV-NEXT: [[LD:%.*]] = load <16 x i8>, ptr [[P]], align 16
+; SPIRV-NEXT: call void @maywrite(ptr [[P]])
+; SPIRV-NEXT: [[INS:%.*]] = insertelement <16 x i8> [[LD]], i8 [[S]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[INS]], ptr [[P]], align 16
+; SPIRV-NEXT: call void @foo()
+; SPIRV-NEXT: [[LD2:%.*]] = load <16 x i8>, ptr [[P]], align 16
+; SPIRV-NEXT: call void @nowrite(ptr [[P]])
+; SPIRV-NEXT: [[INS2:%.*]] = insertelement <16 x i8> [[LD2]], i8 [[S]], i32 7
+; SPIRV-NEXT: store <16 x i8> [[INS2]], ptr [[P]], align 16
+; SPIRV-NEXT: ret void
+;
+entry:
+ %ld = load <16 x i8>, ptr %p
+ call void @maywrite(ptr %p)
+ %ins = insertelement <16 x i8> %ld, i8 %s, i32 3
+ store <16 x i8> %ins, ptr %p
+ call void @foo() ; Barrier
+ %ld2 = load <16 x i8>, ptr %p
+ call void @nowrite(ptr %p)
+ %ins2 = insertelement <16 x i8> %ld2, i8 %s, i32 7
+ store <16 x i8> %ins2, ptr %p
+ ret void
+}
+
+declare void @foo()
+declare void @maywrite(ptr)
+declare void @nowrite(ptr) readonly
+
+; To test if number of instructions in-between exceeds the limit (default 30),
+; the combine will quit.
+define i32 @insert_store_maximum_scan_instrs(i32 %arg, ptr %arg1, ptr %arg2, i8 zeroext %arg3) {
+; SPIRV-LABEL: define i32 @insert_store_maximum_scan_instrs(
+; SPIRV-SAME: i32 [[ARG:%.*]], ptr [[ARG1:%.*]], ptr [[ARG2:%.*]], i8 zeroext [[ARG3:%.*]]) {
+; SPIRV-NEXT: [[BB:.*:]]
+; SPIRV-NEXT: [[I:%.*]] = or i32 [[ARG]], 1
+; SPIRV-NEXT: [[I4:%.*]] = load <16 x i8>, ptr [[ARG2]], align 16
+; SPIRV-NEXT: [[I5:%.*]] = tail call i32 @bar(i32 [[I]], i1 true)
+; SPIRV-NEXT: [[I6:%.*]] = shl i32 [[ARG]], [[I5]]
+; SPIRV-NEXT: [[I7:%.*]] = lshr i32 [[I6]], 26
+; SPIRV-NEXT: [[I8:%.*]] = trunc i32 [[I7]] to i8
+; SPIRV-NEXT: [[I9:%.*]] = and i8 [[I8]], 31
+; SPIRV-NEXT: [[I10:%.*]] = lshr i32 [[I6]], 11
+; SPIRV-NEXT: [[I11:%.*]] = and i32 [[I10]], 32767
+; SPIRV-NEXT: [[I12:%.*]] = zext i8 [[I9]] to i64
+; SPIRV-NEXT: [[I13:%.*]] = getelementptr inbounds i16, ptr [[ARG1]], i64 [[I12]]
+; SPIRV-NEXT: [[I14:%.*]] = load i16, ptr [[I13]], align 2
+; SPIRV-NEXT: [[I15:%.*]] = zext i16 [[I14]] to i32
+; SPIRV-NEXT: [[I16:%.*]] = add nuw nsw i8 [[I9]], 1
+; SPIRV-NEXT: [[I17:%.*]] = zext i8 [[I16]] to i64
+; SPIRV-NEXT: [[I18:%.*]] = getelementptr inbounds i16, ptr [[ARG1]], i64 [[I17]]
+; SPIRV-NEXT: [[I19:%.*]] = load i16, ptr [[I18]], align 2
+; SPIRV-NEXT: [[I20:%.*]] = zext i16 [[I19]] to i32
+; SPIRV-NEXT: [[I21:%.*]] = sub nsw i32 [[I20]], [[I15]]
+; SPIRV-NEXT: [[I22:%.*]] = mul nsw i32 [[I11]], [[I21]]
+; SPIRV-NEXT: [[I23:%.*]] = ashr i32 [[I22]], 15
+; SPIRV-NEXT: [[I24:%.*]] = shl nuw nsw i32 [[I5]], 15
+; SPIRV-NEXT: [[I25:%.*]] = xor i32 [[I24]], 1015808
+; SPIRV-NEXT: [[I26:%.*]] = add nuw nsw i32 [[I25]], [[I15]]
+; SPIRV-NEXT: [[I27:%.*]] = add nsw i32 [[I26]], [[I23]]
+; SPIRV-NEXT: [[I28:%.*]] = sitofp i32 [[ARG]] to double
+; SPIRV-NEXT: [[I29:%.*]] = tail call double @llvm.log2.f64(double [[I28]])
+; SPIRV-NEXT: [[I30:%.*]] = fptosi double [[I29]] to i32
+; SPIRV-NEXT: [[I31:%.*]] = shl nsw i32 [[I30]], 15
+; SPIRV-NEXT: [[I32:%.*]] = or i32 [[I31]], 4
+; SPIRV-NEXT: [[I33:%.*]] = icmp eq i32 [[I27]], [[I32]]
+; SPIRV-NEXT: [[I34:%.*]] = select i1 [[I33]], i32 [[ARG]], i32 [[I31]]
+; SPIRV-NEXT: [[I35:%.*]] = lshr i32 [[I34]], 1
+; SPIRV-NEXT: [[I36:%.*]] = insertelement <16 x i8> [[I4]], i8 [[ARG3]], i32 3
+; SPIRV-NEXT: store <16 x i8> [[I36]], ptr [[ARG2]], align 16
+; SPIRV-NEXT: ret i32 [[I35]]
+;
+bb:
+ %i = or i32 %arg, 1
+ %i4 = load <16 x i8>, ptr %arg2, align 16
+ %i5 = tail call i32 @bar(i32 %i, i1 true)
+ %i6 = shl i32 %arg, %i5
+ %i7 = lshr i32 %i6, 26
+ %i8 = trunc i32 %i7 to i8
+ %i9 = and i8 %i8, 31
+ %i10 = lshr i32 %i6, 11
+ %i11 = and i32 %i10, 32767
+ %i12 = zext i8 %i9 to i64
+ %i13 = getelementptr inbounds i16, ptr %arg1, i64 %i12
+ %i14 = load i16, ptr %i13, align 2
+ %i15 = zext i16 %i14 to i32
+ %i16 = add nuw nsw i8 %i9, 1
+ %i17 = zext i8 %i16 to i64
+ %i18 = getelementptr inbounds i16, ptr %arg1, i64 %i17
+ %i19 = load i16, ptr %i18, align 2
+ %i20 = zext i16 %i19 to i32
+ %i21 = sub nsw i32 %i20, %i15
+ %i22 = mul nsw i32 %i11, %i21
+ %i23 = ashr i32 %i22, 15
+ %i24 = shl nuw nsw i32 %i5, 15
+ %i25 = xor i32 %i24, 1015808
+ %i26 = add nuw nsw i32 %i25, %i15
+ %i27 = add nsw i32 %i26, %i23
+ %i28 = sitofp i32 %arg to double
+ %i29 = tail call double @llvm.log2.f64(double %i28)
+ %i30 = fptosi double %i29 to i32
+ %i31 = shl nsw i32 %i30, 15
+ %i32 = or i32 %i31, 4
+ %i33 = icmp eq i32 %i27, %i32
+ %i34 = select i1 %i33, i32 %arg, i32 %i31
+ %i35 = lshr i32 %i34, 1
+ %i36 = insertelement <16 x i8> %i4, i8 %arg3, i32 3
+ store <16 x i8> %i36, ptr %arg2, align 16
+ ret i32 %i35
+}
+
+declare i32 @bar(i32, i1) readonly
+declare double @llvm.log2.f64(double)
+
diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py
index 143cc38..915e387 100644
--- a/llvm/test/lit.cfg.py
+++ b/llvm/test/lit.cfg.py
@@ -278,6 +278,7 @@ tools.extend(
]
)
+
# Find (major, minor) version of ptxas
def ptxas_version(ptxas):
ptxas_cmd = subprocess.Popen([ptxas, "--version"], stdout=subprocess.PIPE)
@@ -451,7 +452,7 @@ if config.link_llvm_dylib:
"%llvmdylib",
"{}/libLLVM{}.{}".format(
config.llvm_shlib_dir, config.llvm_shlib_ext, config.llvm_dylib_version
- )
+ ),
)
)
@@ -582,6 +583,7 @@ def have_ld64_plugin_support():
if have_ld64_plugin_support():
config.available_features.add("ld64_plugin")
+
def host_unwind_supports_jit():
# Do we expect the host machine to support JIT registration of clang's
# default unwind info format for the host (e.g. eh-frames, compact-unwind,
@@ -589,7 +591,7 @@ def host_unwind_supports_jit():
# Linux and the BSDs use DWARF eh-frames and all known unwinders support
# register_frame at minimum.
- if platform.system() in [ "Linux", "FreeBSD", "NetBSD" ]:
+ if platform.system() in ["Linux", "FreeBSD", "NetBSD"]:
return True
# Windows does not support frame info without the ORC runtime.
@@ -601,11 +603,7 @@ def host_unwind_supports_jit():
# compact-unwind only, and JIT'd registration is not available before
# macOS 14.0.
if platform.system() == "Darwin":
-
- assert (
- "arm64" in config.host_triple
- or "x86_64" in config.host_triple
- )
+ assert "arm64" in config.host_triple or "x86_64" in config.host_triple
if "x86_64" in config.host_triple:
return True
@@ -627,6 +625,7 @@ def host_unwind_supports_jit():
return False
+
if host_unwind_supports_jit():
config.available_features.add("host-unwind-supports-jit")
diff --git a/llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test b/llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test
index f2fe794..db223cd 100644
--- a/llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test
+++ b/llvm/test/tools/dsymutil/ARM/stmt-seq-macho.test
@@ -5,7 +5,13 @@
# RUN: yaml2obj %t/stmt_seq_macho.o.yaml -o %t/stmt_seq_macho.o
# RUN: dsymutil --flat --verify-dwarf=none -oso-prepend-path %t %t/stmt_seq_macho.exe -o %t/stmt_seq_macho.dSYM
# RUN: llvm-dwarfdump --debug-info --debug-line -v %t/stmt_seq_macho.dSYM | sort | FileCheck %s -check-prefix=CHECK_DSYM
+# RUN: llvm-dwarfdump --debug-info --debug-line -v %t/stmt_seq_macho.dSYM > %t/stmt_seq_macho.dSYM.txt
+# RUN: cat %t/stmt_seq_macho.dSYM.txt | sort | FileCheck %s -check-prefix=CHECK_DSYM
+# RUN: cat %t/stmt_seq_macho.dSYM.txt | FileCheck %s -check-prefix=CHECK_NO_INVALID_OFFSET
+# RUN: cat stmt_seq_macho.dSYM.txt | grep DW_AT_LLVM_stmt_sequence | sort | uniq -d | wc -l | FileCheck %s -check-prefix=CHECK_NO_DUPLICATES
+# CHECK_NO_DUPLICATES: 0
+# CHECK_NO_INVALID_OFFSET-NOT: DW_AT_LLVM_stmt_sequence{{.*}}0xfffffff
# CHECK_DSYM: DW_AT_LLVM_stmt_sequence [DW_FORM_sec_offset] ([[OFFSET1:(0x[0-9a-f]+)]])
# CHECK_DSYM: DW_AT_LLVM_stmt_sequence [DW_FORM_sec_offset] ([[OFFSET2:(0x[0-9a-f]+)]])
# CHECK_DSYM: DW_AT_LLVM_stmt_sequence [DW_FORM_sec_offset] ([[OFFSET3:(0x[0-9a-f]+)]])
@@ -18,6 +24,9 @@
#--- stmt_seq_macho.cpp
#define ATTRIB extern "C" __attribute__((noinline))
+ATTRIB int function1_copy1(int a) {
+ return ++a;
+}
ATTRIB int function3_copy1(int a) {
int b = a + 3;
@@ -51,6 +60,7 @@ int main() {
sum += function2_copy2(3);
sum += function3_copy2(41);
sum += function2_copy1(11);
+ sum += function1_copy1(42);
length_error e("test");
return sum;
}
@@ -108,9 +118,9 @@ LoadCommands:
cmdsize: 1032
segname: ''
vmaddr: 0
- vmsize: 2793
+ vmsize: 3125
fileoff: 1208
- filesize: 2793
+ filesize: 3125
maxprot: 7
initprot: 7
nsects: 12
@@ -119,18 +129,18 @@ LoadCommands:
- sectname: __text
segname: __TEXT
addr: 0x0
- size: 128
+ size: 148
offset: 0x4B8
align: 2
- reloff: 0xFA8
- nreloc: 7
+ reloff: 0x10F0
+ nreloc: 8
flags: 0x80000400
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 00100011C0035FD600580051C0035FD600100011C0035FD600580051C0035FD6FFC300D1F44F01A9FD7B02A9FD8300916000805200000094F30300AA20058052000000941400130B6001805200000094F30300AA0100009021000091E03F0091000000948002130BFD7B42A9F44F41A9FFC30091C0035FD600000014C0035FD6
+ content: 00040011C0035FD600100011C0035FD600580051C0035FD600100011C0035FD600580051C0035FD6FFC300D1F44F01A9FD7B02A9FD8300916000805200000094F30300AA20058052000000941400130B6001805200000094F30300AA40058052000000947302000B0100009021000091E03F0091000000948002130BFD7B42A9F44F41A9FFC30091C0035FD600000014C0035FD6
relocations:
- - address: 0x78
+ - address: 0x8C
symbolnum: 4
pcrel: true
length: 2
@@ -138,7 +148,7 @@ LoadCommands:
type: 2
scattered: false
value: 0
- - address: 0x60
+ - address: 0x74
symbolnum: 3
pcrel: true
length: 2
@@ -146,7 +156,7 @@ LoadCommands:
type: 2
scattered: false
value: 0
- - address: 0x58
+ - address: 0x6C
symbolnum: 1
pcrel: false
length: 2
@@ -154,7 +164,7 @@ LoadCommands:
type: 4
scattered: false
value: 0
- - address: 0x54
+ - address: 0x68
symbolnum: 1
pcrel: true
length: 2
@@ -162,7 +172,7 @@ LoadCommands:
type: 3
scattered: false
value: 0
- - address: 0x4C
+ - address: 0x60
symbolnum: 5
pcrel: true
length: 2
@@ -170,16 +180,24 @@ LoadCommands:
type: 2
scattered: false
value: 0
- - address: 0x40
- symbolnum: 8
+ - address: 0x54
+ symbolnum: 6
pcrel: true
length: 2
extern: true
type: 2
scattered: false
value: 0
- - address: 0x34
- symbolnum: 6
+ - address: 0x48
+ symbolnum: 9
+ pcrel: true
+ length: 2
+ extern: true
+ type: 2
+ scattered: false
+ value: 0
+ - address: 0x3C
+ symbolnum: 7
pcrel: true
length: 2
extern: true
@@ -188,9 +206,9 @@ LoadCommands:
value: 0
- sectname: __cstring
segname: __TEXT
- addr: 0x80
+ addr: 0x94
size: 5
- offset: 0x538
+ offset: 0x54C
align: 0
reloff: 0x0
nreloc: 0
@@ -201,9 +219,9 @@ LoadCommands:
content: '7465737400'
- sectname: __debug_loc
segname: __DWARF
- addr: 0x85
+ addr: 0x99
size: 412
- offset: 0x53D
+ offset: 0x551
align: 0
reloff: 0x0
nreloc: 0
@@ -211,12 +229,12 @@ LoadCommands:
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 00000000000000000400000000000000010050040000000000000008000000000000000400A301509F0000000000000000000000000000000000000000000000000400000000000000030070039F0000000000000000000000000000000008000000000000000C000000000000000100500C0000000000000010000000000000000400A301509F0000000000000000000000000000000010000000000000001400000000000000010050140000000000000018000000000000000400A301509F0000000000000000000000000000000010000000000000001400000000000000030070039F0000000000000000000000000000000018000000000000001C000000000000000100501C0000000000000020000000000000000400A301509F000000000000000000000000000000001C0000000000000020000000000000000100500000000000000000000000000000000030000000000000003C00000000000000030011009F3C0000000000000048000000000000000100634800000000000000540000000000000001006400000000000000000000000000000000
+ content: 08000000000000000C000000000000000100500C0000000000000010000000000000000400A301509F0000000000000000000000000000000008000000000000000C00000000000000030070039F0000000000000000000000000000000010000000000000001400000000000000010050140000000000000018000000000000000400A301509F0000000000000000000000000000000018000000000000001C000000000000000100501C0000000000000020000000000000000400A301509F0000000000000000000000000000000018000000000000001C00000000000000030070039F0000000000000000000000000000000020000000000000002400000000000000010050240000000000000028000000000000000400A301509F00000000000000000000000000000000240000000000000028000000000000000100500000000000000000000000000000000038000000000000004400000000000000030011009F4400000000000000500000000000000001006350000000000000005C0000000000000001006400000000000000000000000000000000
- sectname: __debug_abbrev
segname: __DWARF
- addr: 0x221
- size: 359
- offset: 0x6D9
+ addr: 0x235
+ size: 372
+ offset: 0x6ED
align: 0
reloff: 0x0
nreloc: 0
@@ -226,18 +244,34 @@ LoadCommands:
reserved3: 0x0
- sectname: __debug_info
segname: __DWARF
- addr: 0x388
- size: 686
- offset: 0x840
+ addr: 0x3A9
+ size: 747
+ offset: 0x861
align: 0
- reloff: 0xFE0
- nreloc: 14
+ reloff: 0x1130
+ nreloc: 16
flags: 0x2000000
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
relocations:
- - address: 0x26A
+ - address: 0x2A7
+ symbolnum: 1
+ pcrel: false
+ length: 3
+ extern: false
+ type: 0
+ scattered: false
+ value: 0
+ - address: 0x28E
+ symbolnum: 1
+ pcrel: false
+ length: 3
+ extern: false
+ type: 0
+ scattered: false
+ value: 0
+ - address: 0x253
symbolnum: 1
pcrel: false
length: 3
@@ -245,7 +279,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x251
+ - address: 0x1F5
symbolnum: 1
pcrel: false
length: 3
@@ -253,7 +287,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x216
+ - address: 0x1E1
symbolnum: 1
pcrel: false
length: 3
@@ -261,7 +295,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x1B8
+ - address: 0x1CE
symbolnum: 1
pcrel: false
length: 3
@@ -269,7 +303,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x1A5
+ - address: 0x1BA
symbolnum: 1
pcrel: false
length: 3
@@ -277,7 +311,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x191
+ - address: 0x1A7
symbolnum: 1
pcrel: false
length: 3
@@ -285,7 +319,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x17E
+ - address: 0x169
symbolnum: 1
pcrel: false
length: 3
@@ -293,7 +327,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x140
+ - address: 0x12D
symbolnum: 1
pcrel: false
length: 3
@@ -301,7 +335,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x104
+ - address: 0xF1
symbolnum: 1
pcrel: false
length: 3
@@ -309,7 +343,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0xC8
+ - address: 0xC4
symbolnum: 1
pcrel: false
length: 3
@@ -317,7 +351,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x9B
+ - address: 0x88
symbolnum: 1
pcrel: false
length: 3
@@ -351,9 +385,9 @@ LoadCommands:
value: 0
- sectname: __debug_str
segname: __DWARF
- addr: 0x636
- size: 239
- offset: 0xAEE
+ addr: 0x694
+ size: 400
+ offset: 0xB4C
align: 0
reloff: 0x0
nreloc: 0
@@ -363,9 +397,9 @@ LoadCommands:
reserved3: 0x0
- sectname: __apple_names
segname: __DWARF
- addr: 0x725
- size: 260
- offset: 0xBDD
+ addr: 0x824
+ size: 288
+ offset: 0xCDC
align: 0
reloff: 0x0
nreloc: 0
@@ -373,12 +407,12 @@ LoadCommands:
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 485341480100000008000000080000000C000000000000000100000001000600000000000200000005000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF90D9F86F88CB36CF4908311CD1125E5389CB36CF4A08311C522B70536A7F9A7C8000000094000000A4000000B4000000C4000000D4000000E4000000F40000008A0000000200000015020000690200000000000055000000010000009A0000000000000045000000010000005E00000000000000A3000000010000001502000000000000750000000100000003010000000000006500000001000000C700000000000000BB00000001000000690200000000000085000000010000003F01000000000000
+ content: 485341480100000009000000090000000C00000000000000010000000100060000000000FFFFFFFFFFFFFFFF0100000003000000040000000600000007000000080000004A08311CC78E3C8288CB36CF89CB36CFD1125E53522B705390D9F86F6A7F9A7C4908311C8C0000009C000000AC000000BC000000CC000000DC000000EC00000000010000100100000601000001000000F000000000000000D6000000010000005E00000000000000F600000001000000C30000000000000016010000010000002C01000000000000440100000100000052020000000000005C01000001000000A6020000000000002B0100000200000052020000A60200000000000026010000010000006801000000000000E6000000010000008700000000000000
- sectname: __apple_objc
segname: __DWARF
- addr: 0x829
+ addr: 0x944
size: 36
- offset: 0xCE1
+ offset: 0xDFC
align: 0
reloff: 0x0
nreloc: 0
@@ -389,9 +423,9 @@ LoadCommands:
content: 485341480100000001000000000000000C000000000000000100000001000600FFFFFFFF
- sectname: __apple_namespac
segname: __DWARF
- addr: 0x84D
+ addr: 0x968
size: 36
- offset: 0xD05
+ offset: 0xE20
align: 0
reloff: 0x0
nreloc: 0
@@ -402,9 +436,9 @@ LoadCommands:
content: 485341480100000001000000000000000C000000000000000100000001000600FFFFFFFF
- sectname: __apple_types
segname: __DWARF
- addr: 0x871
+ addr: 0x98C
size: 195
- offset: 0xD29
+ offset: 0xE44
align: 0
reloff: 0x0
nreloc: 0
@@ -412,21 +446,29 @@ LoadCommands:
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 48534148010000000500000005000000140000000000000003000000010006000300050004000B000000000002000000FFFFFFFF03000000040000007CA8F05D90D9F86F5B738CDC3080880B6320957C64000000770000008A0000009D000000B00000009700000001000000EA010000130000000000008A00000001000000C80100001300000000000031000000010000005700000024000000000000D300000001000000A1020000240000000000002C000000010000005000000024000000000000
+ content: 48534148010000000500000005000000140000000000000003000000010006000300050004000B000000000002000000FFFFFFFF03000000040000007CA8F05D90D9F86F5B738CDC3080880B6320957C64000000770000008A0000009D000000B0000000380100000100000027020000130000000000002B010000010000000502000013000000000000C20000000100000057000000240000000000007401000001000000DE02000024000000000000BD000000010000005000000024000000000000
- sectname: __debug_frame
segname: __DWARF
- addr: 0x938
- size: 208
- offset: 0xDF0
+ addr: 0xA50
+ size: 232
+ offset: 0xF08
align: 3
- reloff: 0x1050
- nreloc: 7
+ reloff: 0x11B0
+ nreloc: 8
flags: 0x2000000
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 14000000FFFFFFFF0400080001781E0C1F00000000000000140000000000000000000000000000000800000000000000140000000000000008000000000000000800000000000000140000000000000010000000000000000800000000000000140000000000000018000000000000000800000000000000240000000000000020000000000000005800000000000000500C1D109E019D02930394040000000014000000000000007800000000000000040000000000000014000000000000007C000000000000000400000000000000
+ content: 14000000FFFFFFFF0400080001781E0C1F00000000000000140000000000000000000000000000000800000000000000140000000000000008000000000000000800000000000000140000000000000010000000000000000800000000000000140000000000000018000000000000000800000000000000140000000000000020000000000000000800000000000000240000000000000028000000000000006400000000000000500C1D109E019D02930394040000000014000000000000008C000000000000000400000000000000140000000000000090000000000000000400000000000000
relocations:
+ - address: 0xD8
+ symbolnum: 1
+ pcrel: false
+ length: 3
+ extern: false
+ type: 0
+ scattered: false
+ value: 0
- address: 0xC0
symbolnum: 1
pcrel: false
@@ -435,7 +477,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0xA8
+ - address: 0x98
symbolnum: 1
pcrel: false
length: 3
@@ -485,18 +527,26 @@ LoadCommands:
value: 0
- sectname: __debug_line
segname: __DWARF
- addr: 0xA08
- size: 225
- offset: 0xEC0
+ addr: 0xB38
+ size: 253
+ offset: 0xFF0
align: 0
- reloff: 0x1088
- nreloc: 7
+ reloff: 0x11F0
+ nreloc: 8
flags: 0x2000000
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
relocations:
- - address: 0xD1
+ - address: 0xED
+ symbolnum: 1
+ pcrel: false
+ length: 3
+ extern: false
+ type: 0
+ scattered: false
+ value: 0
+ - address: 0xD9
symbolnum: 1
pcrel: false
length: 3
@@ -504,7 +554,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0xBD
+ - address: 0xAA
symbolnum: 1
pcrel: false
length: 3
@@ -512,7 +562,7 @@ LoadCommands:
type: 0
scattered: false
value: 0
- - address: 0x92
+ - address: 0x96
symbolnum: 1
pcrel: false
length: 3
@@ -560,21 +610,21 @@ LoadCommands:
ntools: 0
- cmd: LC_LINKER_OPTIMIZATION_HINT
cmdsize: 16
- dataoff: 4288
+ dataoff: 4656
datasize: 8
- cmd: LC_SYMTAB
cmdsize: 24
- symoff: 4296
- nsyms: 10
- stroff: 4456
- strsize: 144
+ symoff: 4664
+ nsyms: 11
+ stroff: 4840
+ strsize: 168
- cmd: LC_DYSYMTAB
cmdsize: 80
ilocalsym: 0
nlocalsym: 3
iextdefsym: 3
- nextdefsym: 7
- iundefsym: 10
+ nextdefsym: 8
+ iundefsym: 11
nundefsym: 0
tocoff: 0
ntoc: 0
@@ -590,7 +640,7 @@ LoadCommands:
nlocrel: 0
LinkEditData:
NameList:
- - n_strx: 138
+ - n_strx: 155
n_type: 0xE
n_sect: 1
n_desc: 0
@@ -599,47 +649,52 @@ LinkEditData:
n_type: 0xE
n_sect: 2
n_desc: 0
- n_value: 128
- - n_strx: 132
+ n_value: 148
+ - n_strx: 149
n_type: 0xE
n_sect: 2
n_desc: 0
- n_value: 128
+ n_value: 148
- n_strx: 39
n_type: 0xF
n_sect: 1
n_desc: 192
- n_value: 120
+ n_value: 140
- n_strx: 14
n_type: 0xF
n_sect: 1
n_desc: 192
- n_value: 124
+ n_value: 144
+ - n_strx: 132
+ n_type: 0xF
+ n_sect: 1
+ n_desc: 0
+ n_value: 0
- n_strx: 115
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 8
+ n_value: 16
- n_strx: 81
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 24
+ n_value: 32
- n_strx: 98
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 0
+ n_value: 8
- n_strx: 64
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 16
+ n_value: 24
- n_strx: 8
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 32
+ n_value: 40
StringTable:
- ''
- l_.str
@@ -650,16 +705,25 @@ LinkEditData:
- _function2_copy2
- _function3_copy1
- _function2_copy1
+ - _function1_copy1
- ltmp1
- ltmp0
+ - ''
+ - ''
+ - ''
+ - ''
+ - ''
+ - ''
+ - ''
DWARF:
debug_str:
- - ''
+ - 'Facebook clang version 19.1.5 (https://git.internal.tfbnw.net/repos/git/rw/osmeta/external/llvm-project b36c9ae1f8f2b39e4aafb9ca4700c608c3036365)'
- stmt_seq_macho.cpp
- '/'
- '/private/tmp/stmt_seq'
- char
- __ARRAY_SIZE_TYPE__
+ - function1_copy1
- function3_copy1
- function2_copy1
- function3_copy2
@@ -786,6 +850,18 @@ DWARF:
Tag: DW_TAG_formal_parameter
Children: DW_CHILDREN_no
Attributes:
+ - Attribute: DW_AT_name
+ Form: DW_FORM_strp
+ - Attribute: DW_AT_decl_file
+ Form: DW_FORM_data1
+ - Attribute: DW_AT_decl_line
+ Form: DW_FORM_data1
+ - Attribute: DW_AT_type
+ Form: DW_FORM_ref4
+ - Code: 0xA
+ Tag: DW_TAG_formal_parameter
+ Children: DW_CHILDREN_no
+ Attributes:
- Attribute: DW_AT_location
Form: DW_FORM_sec_offset
- Attribute: DW_AT_name
@@ -796,7 +872,7 @@ DWARF:
Form: DW_FORM_data1
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0xA
+ - Code: 0xB
Tag: DW_TAG_variable
Children: DW_CHILDREN_no
Attributes:
@@ -810,7 +886,7 @@ DWARF:
Form: DW_FORM_data1
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0xB
+ - Code: 0xC
Tag: DW_TAG_subprogram
Children: DW_CHILDREN_yes
Attributes:
@@ -836,7 +912,7 @@ DWARF:
Form: DW_FORM_flag_present
- Attribute: DW_AT_APPLE_optimized
Form: DW_FORM_flag_present
- - Code: 0xC
+ - Code: 0xD
Tag: DW_TAG_variable
Children: DW_CHILDREN_no
Attributes:
@@ -850,7 +926,7 @@ DWARF:
Form: DW_FORM_data1
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0xD
+ - Code: 0xE
Tag: DW_TAG_call_site
Children: DW_CHILDREN_yes
Attributes:
@@ -858,7 +934,7 @@ DWARF:
Form: DW_FORM_ref4
- Attribute: DW_AT_call_return_pc
Form: DW_FORM_addr
- - Code: 0xE
+ - Code: 0xF
Tag: DW_TAG_call_site_parameter
Children: DW_CHILDREN_no
Attributes:
@@ -866,7 +942,7 @@ DWARF:
Form: DW_FORM_exprloc
- Attribute: DW_AT_call_value
Form: DW_FORM_exprloc
- - Code: 0xF
+ - Code: 0x10
Tag: DW_TAG_structure_type
Children: DW_CHILDREN_yes
Attributes:
@@ -880,7 +956,7 @@ DWARF:
Form: DW_FORM_data1
- Attribute: DW_AT_decl_line
Form: DW_FORM_data1
- - Code: 0x10
+ - Code: 0x11
Tag: DW_TAG_inheritance
Children: DW_CHILDREN_no
Attributes:
@@ -888,7 +964,7 @@ DWARF:
Form: DW_FORM_ref4
- Attribute: DW_AT_data_member_location
Form: DW_FORM_data1
- - Code: 0x11
+ - Code: 0x12
Tag: DW_TAG_subprogram
Children: DW_CHILDREN_yes
Attributes:
@@ -906,7 +982,7 @@ DWARF:
Form: DW_FORM_flag_present
- Attribute: DW_AT_explicit
Form: DW_FORM_flag_present
- - Code: 0x12
+ - Code: 0x13
Tag: DW_TAG_formal_parameter
Children: DW_CHILDREN_no
Attributes:
@@ -914,13 +990,13 @@ DWARF:
Form: DW_FORM_ref4
- Attribute: DW_AT_artificial
Form: DW_FORM_flag_present
- - Code: 0x13
+ - Code: 0x14
Tag: DW_TAG_formal_parameter
Children: DW_CHILDREN_no
Attributes:
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0x14
+ - Code: 0x15
Tag: DW_TAG_subprogram
Children: DW_CHILDREN_yes
Attributes:
@@ -936,13 +1012,13 @@ DWARF:
Form: DW_FORM_flag_present
- Attribute: DW_AT_APPLE_optimized
Form: DW_FORM_flag_present
- - Code: 0x15
+ - Code: 0x16
Tag: DW_TAG_pointer_type
Children: DW_CHILDREN_no
Attributes:
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0x16
+ - Code: 0x17
Tag: DW_TAG_subprogram
Children: DW_CHILDREN_yes
Attributes:
@@ -964,7 +1040,7 @@ DWARF:
Form: DW_FORM_strp
- Attribute: DW_AT_specification
Form: DW_FORM_ref4
- - Code: 0x17
+ - Code: 0x18
Tag: DW_TAG_formal_parameter
Children: DW_CHILDREN_no
Attributes:
@@ -976,7 +1052,7 @@ DWARF:
Form: DW_FORM_ref4
- Attribute: DW_AT_artificial
Form: DW_FORM_flag_present
- - Code: 0x18
+ - Code: 0x19
Tag: DW_TAG_formal_parameter
Children: DW_CHILDREN_no
Attributes:
@@ -990,7 +1066,7 @@ DWARF:
Form: DW_FORM_data1
- Attribute: DW_AT_type
Form: DW_FORM_ref4
- - Code: 0x19
+ - Code: 0x1A
Tag: DW_TAG_call_site
Children: DW_CHILDREN_yes
Attributes:
@@ -1001,7 +1077,7 @@ DWARF:
- Attribute: DW_AT_call_pc
Form: DW_FORM_addr
debug_info:
- - Length: 0x2AA
+ - Length: 0x2E7
Version: 4
AbbrevTableID: 0
AbbrOffset: 0x0
@@ -1011,20 +1087,20 @@ DWARF:
Values:
- Value: 0x0
- Value: 0x21
- - Value: 0x1
- - Value: 0x14
+ - Value: 0x92
+ - Value: 0xA5
- Value: 0x0
- - Value: 0x16
+ - Value: 0xA7
- Value: 0x1
- Value: 0x0
- - Value: 0x80
+ - Value: 0x94
- AbbrCode: 0x2
Values:
- Value: 0x3F
- Value: 0x1
- - Value: 0x23
+ - Value: 0x27
- Value: 0x9
- BlockData: [ 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ BlockData: [ 0x3, 0x94, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0 ]
- AbbrCode: 0x3
Values:
@@ -1039,12 +1115,12 @@ DWARF:
- Value: 0x50
- AbbrCode: 0x6
Values:
- - Value: 0x2C
+ - Value: 0xBD
- Value: 0x6
- Value: 0x1
- AbbrCode: 0x7
Values:
- - Value: 0x31
+ - Value: 0xC2
- Value: 0x8
- Value: 0x7
- AbbrCode: 0x8
@@ -1056,285 +1132,318 @@ DWARF:
- Value: 0x1
BlockData: [ 0x6F ]
- Value: 0x1
- - Value: 0x45
+ - Value: 0xD6
- Value: 0x1
- - Value: 0x3
- - Value: 0x2A1
+ - Value: 0x2
+ - Value: 0x2DE
- Value: 0x1
- Value: 0x1
- AbbrCode: 0x9
Values:
- - Value: 0x0
- - Value: 0xD7
+ - Value: 0x178
+ - Value: 0x1
+ - Value: 0x2
+ - Value: 0x2DE
+ - AbbrCode: 0x0
+ - AbbrCode: 0x8
+ Values:
+ - Value: 0x8
+ - Value: 0x8
+ - Value: 0x1
+ - Value: 0x4A
+ - Value: 0x1
+ BlockData: [ 0x6F ]
+ - Value: 0x1
+ - Value: 0xE6
+ - Value: 0x1
+ - Value: 0x6
+ - Value: 0x2DE
+ - Value: 0x1
- Value: 0x1
- - Value: 0x3
- - Value: 0x2A1
- AbbrCode: 0xA
Values:
+ - Value: 0x0
+ - Value: 0x178
+ - Value: 0x1
+ - Value: 0x6
+ - Value: 0x2DE
+ - AbbrCode: 0xB
+ Values:
- Value: 0x39
- - Value: 0xD9
+ - Value: 0x17A
- Value: 0x1
- - Value: 0x4
- - Value: 0x2A1
+ - Value: 0x7
+ - Value: 0x2DE
- AbbrCode: 0x0
- AbbrCode: 0x8
Values:
- - Value: 0x8
+ - Value: 0x10
- Value: 0x8
- Value: 0x1
- - Value: 0x4A
+ - Value: 0x60
- Value: 0x1
BlockData: [ 0x6F ]
- Value: 0x1
- - Value: 0x55
+ - Value: 0xF6
- Value: 0x1
- - Value: 0x8
- - Value: 0x2A1
+ - Value: 0xB
+ - Value: 0x2DE
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0x9
+ - AbbrCode: 0xA
Values:
- Value: 0x5E
- - Value: 0xD7
+ - Value: 0x178
- Value: 0x1
- - Value: 0x8
- - Value: 0x2A1
+ - Value: 0xB
+ - Value: 0x2DE
- AbbrCode: 0x0
- AbbrCode: 0x8
Values:
- - Value: 0x10
+ - Value: 0x18
- Value: 0x8
- Value: 0x1
- - Value: 0x60
+ - Value: 0x78
- Value: 0x1
BlockData: [ 0x6F ]
- Value: 0x1
- - Value: 0x65
+ - Value: 0x106
- Value: 0x1
- - Value: 0xC
- - Value: 0x2A1
+ - Value: 0xF
+ - Value: 0x2DE
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0x9
+ - AbbrCode: 0xA
Values:
- Value: 0x97
- - Value: 0xD7
+ - Value: 0x178
- Value: 0x1
- - Value: 0xC
- - Value: 0x2A1
- - AbbrCode: 0xA
+ - Value: 0xF
+ - Value: 0x2DE
+ - AbbrCode: 0xB
Values:
- Value: 0xD0
- - Value: 0xD9
+ - Value: 0x17A
- Value: 0x1
- - Value: 0xD
- - Value: 0x2A1
+ - Value: 0x10
+ - Value: 0x2DE
- AbbrCode: 0x0
- AbbrCode: 0x8
Values:
- - Value: 0x18
+ - Value: 0x20
- Value: 0x8
- Value: 0x1
- - Value: 0x78
+ - Value: 0x90
- Value: 0x1
BlockData: [ 0x6F ]
- Value: 0x1
- - Value: 0x75
+ - Value: 0x116
- Value: 0x1
- - Value: 0x11
- - Value: 0x2A1
+ - Value: 0x14
+ - Value: 0x2DE
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0x9
+ - AbbrCode: 0xA
Values:
- Value: 0xF5
- - Value: 0xD7
+ - Value: 0x178
- Value: 0x1
- - Value: 0x11
- - Value: 0x2A1
- - AbbrCode: 0xA
+ - Value: 0x14
+ - Value: 0x2DE
+ - AbbrCode: 0xB
Values:
- Value: 0x12E
- - Value: 0xDB
+ - Value: 0x17C
- Value: 0x1
- - Value: 0x12
- - Value: 0x2A1
+ - Value: 0x15
+ - Value: 0x2DE
- AbbrCode: 0x0
- - AbbrCode: 0xB
+ - AbbrCode: 0xC
Values:
- - Value: 0x20
- - Value: 0x58
- - Value: 0x8F
+ - Value: 0x28
+ - Value: 0x64
+ - Value: 0xA7
- Value: 0x1
BlockData: [ 0x6D ]
- Value: 0x1
- - Value: 0x85
+ - Value: 0x126
- Value: 0x1
- - Value: 0x1E
- - Value: 0x2A1
+ - Value: 0x21
+ - Value: 0x2DE
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0xC
+ - AbbrCode: 0xD
Values:
- Value: 0x2
BlockData: [ 0x8F, 0xF ]
- - Value: 0xE2
+ - Value: 0x183
- Value: 0x1
- - Value: 0x23
- - Value: 0x1C8
- - AbbrCode: 0xA
+ - Value: 0x27
+ - Value: 0x205
+ - AbbrCode: 0xB
Values:
- Value: 0x151
- - Value: 0xE4
+ - Value: 0x185
- Value: 0x1
- - Value: 0x1F
- - Value: 0x2A1
- - AbbrCode: 0xD
- Values:
- - Value: 0x103
- - Value: 0x38
+ - Value: 0x22
+ - Value: 0x2DE
- AbbrCode: 0xE
Values:
+ - Value: 0x12C
+ - Value: 0x40
+ - AbbrCode: 0xF
+ Values:
- Value: 0x1
BlockData: [ 0x50 ]
- Value: 0x1
BlockData: [ 0x33 ]
- AbbrCode: 0x0
- - AbbrCode: 0xD
- Values:
- - Value: 0xC7
- - Value: 0x44
- AbbrCode: 0xE
Values:
+ - Value: 0xF0
+ - Value: 0x4C
+ - AbbrCode: 0xF
+ Values:
- Value: 0x1
BlockData: [ 0x50 ]
- Value: 0x2
BlockData: [ 0x10, 0x29 ]
- AbbrCode: 0x0
- - AbbrCode: 0xD
- Values:
- - Value: 0x9A
- - Value: 0x50
- AbbrCode: 0xE
Values:
+ - Value: 0xC3
+ - Value: 0x58
+ - AbbrCode: 0xF
+ Values:
- Value: 0x1
BlockData: [ 0x50 ]
- Value: 0x1
BlockData: [ 0x3B ]
- AbbrCode: 0x0
- - AbbrCode: 0xD
+ - AbbrCode: 0xE
Values:
- - Value: 0x215
+ - Value: 0x5E
- Value: 0x64
+ - AbbrCode: 0xF
+ Values:
+ - Value: 0x1
+ BlockData: [ 0x50 ]
+ - Value: 0x2
+ BlockData: [ 0x10, 0x2A ]
+ - AbbrCode: 0x0
- AbbrCode: 0xE
Values:
+ - Value: 0x252
+ - Value: 0x78
+ - AbbrCode: 0xF
+ Values:
- Value: 0x1
BlockData: [ 0x50 ]
- Value: 0x2
BlockData: [ 0x8F, 0xF ]
- AbbrCode: 0x0
- AbbrCode: 0x0
- - AbbrCode: 0xF
+ - AbbrCode: 0x10
Values:
- Value: 0x5
- - Value: 0x8A
+ - Value: 0x12B
- Value: 0x1
- Value: 0x1
- - Value: 0x1A
- - AbbrCode: 0x10
+ - Value: 0x1D
+ - AbbrCode: 0x11
Values:
- - Value: 0x1EA
+ - Value: 0x227
- Value: 0x0
- - AbbrCode: 0x11
+ - AbbrCode: 0x12
Values:
- - Value: 0x8A
+ - Value: 0x12B
- Value: 0x1
- - Value: 0x1B
+ - Value: 0x1E
- Value: 0x1
- Value: 0x1
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0x12
+ - AbbrCode: 0x13
Values:
- - Value: 0x210
+ - Value: 0x24D
- Value: 0x1
- - AbbrCode: 0x13
+ - AbbrCode: 0x14
Values:
- - Value: 0x20B
+ - Value: 0x248
- AbbrCode: 0x0
- AbbrCode: 0x0
- - AbbrCode: 0xF
+ - AbbrCode: 0x10
Values:
- Value: 0x5
- - Value: 0x97
+ - Value: 0x138
- Value: 0x1
- Value: 0x1
- - Value: 0x16
- - AbbrCode: 0x14
+ - Value: 0x19
+ - AbbrCode: 0x15
Values:
- - Value: 0x97
+ - Value: 0x138
- Value: 0x1
- - Value: 0x17
+ - Value: 0x1A
- Value: 0x1
- Value: 0x1
- Value: 0x1
- - AbbrCode: 0x12
+ - AbbrCode: 0x13
Values:
- - Value: 0x206
+ - Value: 0x243
- Value: 0x1
- - AbbrCode: 0x13
+ - AbbrCode: 0x14
Values:
- - Value: 0x20B
+ - Value: 0x248
- AbbrCode: 0x0
- AbbrCode: 0x0
- - AbbrCode: 0x15
+ - AbbrCode: 0x16
Values:
- - Value: 0x1EA
- - AbbrCode: 0x15
+ - Value: 0x227
+ - AbbrCode: 0x16
Values:
- Value: 0x4B
- - AbbrCode: 0x15
- Values:
- - Value: 0x1C8
- AbbrCode: 0x16
Values:
- - Value: 0x78
+ - Value: 0x205
+ - AbbrCode: 0x17
+ Values:
+ - Value: 0x8C
- Value: 0x4
- Value: 0x1
- - Value: 0xB7
+ - Value: 0xD3
- Value: 0x1
BlockData: [ 0x6F ]
- - Value: 0x234
+ - Value: 0x271
- Value: 0x1
- - Value: 0xA3
- - Value: 0x1D7
- - AbbrCode: 0x17
+ - Value: 0x144
+ - Value: 0x214
+ - AbbrCode: 0x18
Values:
- Value: 0x1
BlockData: [ 0x50 ]
- - Value: 0xE8
- - Value: 0x2A8
+ - Value: 0x189
+ - Value: 0x2E5
- Value: 0x1
- - AbbrCode: 0x18
+ - AbbrCode: 0x19
Values:
- Value: 0x1
BlockData: [ 0x51 ]
- - Value: 0xED
+ - Value: 0x18E
- Value: 0x1
- - Value: 0x1B
- - Value: 0x20B
- - AbbrCode: 0x19
+ - Value: 0x1E
+ - Value: 0x248
+ - AbbrCode: 0x1A
Values:
- - Value: 0x269
+ - Value: 0x2A6
- Value: 0x1
- - Value: 0x78
- - AbbrCode: 0xE
+ - Value: 0x8C
+ - AbbrCode: 0xF
Values:
- Value: 0x1
BlockData: [ 0x50 ]
- Value: 0x3
BlockData: [ 0xA3, 0x1, 0x50 ]
- - AbbrCode: 0xE
+ - AbbrCode: 0xF
Values:
- Value: 0x1
BlockData: [ 0x51 ]
@@ -1342,45 +1451,45 @@ DWARF:
BlockData: [ 0xA3, 0x1, 0x51 ]
- AbbrCode: 0x0
- AbbrCode: 0x0
- - AbbrCode: 0x16
+ - AbbrCode: 0x17
Values:
- - Value: 0x7C
+ - Value: 0x90
- Value: 0x4
- Value: 0x1
- - Value: 0xCB
+ - Value: 0xE7
- Value: 0x1
BlockData: [ 0x6F ]
- - Value: 0x288
+ - Value: 0x2C5
- Value: 0x1
- - Value: 0xBB
- - Value: 0x1D7
- - AbbrCode: 0x17
+ - Value: 0x15C
+ - Value: 0x214
+ - AbbrCode: 0x18
Values:
- Value: 0x1
BlockData: [ 0x50 ]
- - Value: 0xE8
- - Value: 0x2A8
+ - Value: 0x189
+ - Value: 0x2E5
- Value: 0x1
- - AbbrCode: 0x18
+ - AbbrCode: 0x19
Values:
- Value: 0x1
BlockData: [ 0x51 ]
- - Value: 0xED
+ - Value: 0x18E
- Value: 0x1
- - Value: 0x1B
- - Value: 0x20B
+ - Value: 0x1E
+ - Value: 0x248
- AbbrCode: 0x0
- AbbrCode: 0x6
Values:
- - Value: 0xD3
+ - Value: 0x174
- Value: 0x5
- Value: 0x4
- - AbbrCode: 0x15
+ - AbbrCode: 0x16
Values:
- - Value: 0x1C8
+ - Value: 0x205
- AbbrCode: 0x0
debug_line:
- - Length: 221
+ - Length: 249
Version: 4
PrologueLength: 42
MinInstLength: 1
@@ -1397,17 +1506,17 @@ DWARF:
Length: 0
Opcodes:
- Opcode: DW_LNS_set_column
- Data: 14
+ Data: 10
- Opcode: DW_LNS_set_prologue_end
Data: 0
- Opcode: DW_LNS_extended_op
ExtLen: 9
SubOpcode: DW_LNE_set_address
Data: 0
- - Opcode: 0x16
+ - Opcode: 0x14
Data: 0
- Opcode: DW_LNS_set_column
- Data: 5
+ Data: 3
- Opcode: DW_LNS_negate_stmt
Data: 0
- Opcode: 0x4A
@@ -1424,7 +1533,7 @@ DWARF:
ExtLen: 9
SubOpcode: DW_LNE_set_address
Data: 8
- - Opcode: 0x1A
+ - Opcode: 0x19
Data: 0
- Opcode: DW_LNS_set_column
Data: 5
@@ -1445,7 +1554,7 @@ DWARF:
SubOpcode: DW_LNE_set_address
Data: 16
- Opcode: DW_LNS_advance_line
- SData: 13
+ SData: 11
Data: 0
- Opcode: DW_LNS_copy
Data: 0
@@ -1460,7 +1569,7 @@ DWARF:
SubOpcode: DW_LNE_end_sequence
Data: 0
- Opcode: DW_LNS_set_column
- Data: 20
+ Data: 14
- Opcode: DW_LNS_set_prologue_end
Data: 0
- Opcode: DW_LNS_extended_op
@@ -1468,24 +1577,47 @@ DWARF:
SubOpcode: DW_LNE_set_address
Data: 24
- Opcode: DW_LNS_advance_line
- SData: 17
+ SData: 16
Data: 0
- Opcode: DW_LNS_copy
Data: 0
- Opcode: DW_LNS_set_column
Data: 5
- - Opcode: 0x4B
+ - Opcode: DW_LNS_negate_stmt
+ Data: 0
+ - Opcode: 0x4A
Data: 0
- Opcode: DW_LNS_extended_op
ExtLen: 1
SubOpcode: DW_LNE_end_sequence
Data: 0
+ - Opcode: DW_LNS_set_column
+ Data: 20
+ - Opcode: DW_LNS_set_prologue_end
+ Data: 0
- Opcode: DW_LNS_extended_op
ExtLen: 9
SubOpcode: DW_LNE_set_address
Data: 32
- Opcode: DW_LNS_advance_line
- SData: 29
+ SData: 20
+ Data: 0
+ - Opcode: DW_LNS_copy
+ Data: 0
+ - Opcode: DW_LNS_set_column
+ Data: 5
+ - Opcode: 0x4B
+ Data: 0
+ - Opcode: DW_LNS_extended_op
+ ExtLen: 1
+ SubOpcode: DW_LNE_end_sequence
+ Data: 0
+ - Opcode: DW_LNS_extended_op
+ ExtLen: 9
+ SubOpcode: DW_LNE_set_address
+ Data: 40
+ - Opcode: DW_LNS_advance_line
+ SData: 32
Data: 0
- Opcode: DW_LNS_copy
Data: 0
@@ -1509,9 +1641,15 @@ DWARF:
Data: 0
- Opcode: 0x4B
Data: 0
+ - Opcode: 0xBB
+ Data: 0
+ - Opcode: DW_LNS_set_column
+ Data: 9
+ - Opcode: 0x81
+ Data: 0
- Opcode: DW_LNS_set_column
Data: 18
- - Opcode: 0xBB
+ - Opcode: 0x4C
Data: 0
- Opcode: DW_LNS_set_column
Data: 9
@@ -1534,9 +1672,9 @@ DWARF:
- Opcode: DW_LNS_extended_op
ExtLen: 9
SubOpcode: DW_LNE_set_address
- Data: 120
+ Data: 140
- Opcode: DW_LNS_advance_line
- SData: 26
+ SData: 29
Data: 0
- Opcode: DW_LNS_copy
Data: 0
@@ -1551,9 +1689,9 @@ DWARF:
- Opcode: DW_LNS_extended_op
ExtLen: 9
SubOpcode: DW_LNE_set_address
- Data: 124
+ Data: 144
- Opcode: DW_LNS_advance_line
- SData: 26
+ SData: 29
Data: 0
- Opcode: DW_LNS_copy
Data: 0
@@ -1604,7 +1742,7 @@ LoadCommands:
- sectname: __text
segname: __TEXT
addr: 0x1000002F0
- size: 112
+ size: 132
offset: 0x2F0
align: 2
reloff: 0x0
@@ -1613,12 +1751,12 @@ LoadCommands:
reserved1: 0x0
reserved2: 0x0
reserved3: 0x0
- content: 00580051C0035FD600100011C0035FD6FFC300D1F44F01A9FD7B02A9FD83009160008052F7FFFF97F30300AA20058052F6FFFF971400130B60018052F1FFFF97F30300AA610100101F2003D5E03F0091060000948002130BFD7B42A9F44F41A9FFC30091C0035FD601000014C0035FD6
+ content: 00040011C0035FD600580051C0035FD600100011C0035FD6FFC300D1F44F01A9FD7B02A9FD83009160008052F7FFFF97F30300AA20058052F6FFFF971400130B60018052F1FFFF97F30300AA40058052ECFFFF977302000B610100101F2003D5E03F0091060000948002130BFD7B42A9F44F41A9FFC30091C0035FD601000014C0035FD6
- sectname: __cstring
segname: __TEXT
- addr: 0x100000360
+ addr: 0x100000374
size: 5
- offset: 0x360
+ offset: 0x374
align: 0
reloff: 0x0
nreloc: 0
@@ -1631,9 +1769,9 @@ LoadCommands:
cmdsize: 72
segname: __LINKEDIT
vmaddr: 4294983680
- vmsize: 960
+ vmsize: 1040
fileoff: 16384
- filesize: 960
+ filesize: 1040
maxprot: 1
initprot: 1
nsects: 0
@@ -1649,20 +1787,20 @@ LoadCommands:
lazy_bind_off: 0
lazy_bind_size: 0
export_off: 16384
- export_size: 96
+ export_size: 112
- cmd: LC_SYMTAB
cmdsize: 24
- symoff: 16488
- nsyms: 22
- stroff: 16840
- strsize: 192
+ symoff: 16504
+ nsyms: 25
+ stroff: 16904
+ strsize: 208
- cmd: LC_DYSYMTAB
cmdsize: 80
ilocalsym: 0
- nlocalsym: 17
- iextdefsym: 17
- nextdefsym: 5
- iundefsym: 22
+ nlocalsym: 19
+ iextdefsym: 19
+ nextdefsym: 6
+ iundefsym: 25
nundefsym: 0
tocoff: 0
ntoc: 0
@@ -1683,7 +1821,7 @@ LoadCommands:
ZeroPadBytes: 7
- cmd: LC_UUID
cmdsize: 24
- uuid: 4C4C4480-5555-3144-A138-E5DA50CC68DB
+ uuid: 4C4C443F-5555-3144-A15F-DE084AB2A15B
- cmd: LC_BUILD_VERSION
cmdsize: 32
platform: 1
@@ -1692,22 +1830,22 @@ LoadCommands:
ntools: 1
Tools:
- tool: 4
- version: 1376256
+ version: 1245445
- cmd: LC_MAIN
cmdsize: 24
- entryoff: 768
+ entryoff: 776
stacksize: 0
- cmd: LC_FUNCTION_STARTS
cmdsize: 16
- dataoff: 16480
+ dataoff: 16496
datasize: 8
- cmd: LC_DATA_IN_CODE
cmdsize: 16
- dataoff: 16488
+ dataoff: 16504
datasize: 0
- cmd: LC_CODE_SIGNATURE
cmdsize: 16
- dataoff: 17040
+ dataoff: 17120
datasize: 304
LinkEditData:
ExportTrie:
@@ -1738,7 +1876,7 @@ LinkEditData:
NodeOffset: 47
Name: main
Flags: 0x0
- Address: 0x300
+ Address: 0x308
Other: 0x0
ImportName: ''
- TerminalSize: 0
@@ -1749,8 +1887,15 @@ LinkEditData:
Other: 0x0
ImportName: ''
Children:
+ - TerminalSize: 3
+ NodeOffset: 80
+ Name: 1_copy1
+ Flags: 0x0
+ Address: 0x2F0
+ Other: 0x0
+ ImportName: ''
- TerminalSize: 0
- NodeOffset: 71
+ NodeOffset: 85
Name: 2_copy
Flags: 0x0
Address: 0x0
@@ -1758,52 +1903,52 @@ LinkEditData:
ImportName: ''
Children:
- TerminalSize: 3
- NodeOffset: 79
+ NodeOffset: 93
Name: '1'
Flags: 0x0
- Address: 0x2F0
+ Address: 0x2F8
Other: 0x0
ImportName: ''
- TerminalSize: 3
- NodeOffset: 84
+ NodeOffset: 98
Name: '2'
Flags: 0x0
- Address: 0x2F0
+ Address: 0x2F8
Other: 0x0
ImportName: ''
- TerminalSize: 3
- NodeOffset: 89
+ NodeOffset: 103
Name: 3_copy2
Flags: 0x0
- Address: 0x2F8
+ Address: 0x300
Other: 0x0
ImportName: ''
NameList:
- - n_strx: 129
+ - n_strx: 146
n_type: 0x64
n_sect: 0
n_desc: 0
n_value: 0
- - n_strx: 170
+ - n_strx: 187
n_type: 0x66
n_sect: 0
n_desc: 1
n_value: 0
- - n_strx: 59
+ - n_strx: 76
n_type: 0x24
n_sect: 1
n_desc: 0
- n_value: 4294968152
+ n_value: 4294968172
- n_strx: 1
n_type: 0x24
n_sect: 0
n_desc: 0
n_value: 4
- - n_strx: 84
+ - n_strx: 101
n_type: 0x24
n_sect: 1
n_desc: 0
- n_value: 4294968156
+ n_value: 4294968176
- n_strx: 1
n_type: 0x24
n_sect: 0
@@ -1813,12 +1958,12 @@ LinkEditData:
n_type: 0x24
n_sect: 1
n_desc: 0
- n_value: 4294968064
+ n_value: 4294968072
- n_strx: 1
n_type: 0x24
n_sect: 0
n_desc: 0
- n_value: 88
+ n_value: 100
- n_strx: 8
n_type: 0x24
n_sect: 1
@@ -1843,7 +1988,17 @@ LinkEditData:
n_type: 0x24
n_sect: 1
n_desc: 0
- n_value: 4294968048
+ n_value: 4294968064
+ - n_strx: 1
+ n_type: 0x24
+ n_sect: 0
+ n_desc: 0
+ n_value: 8
+ - n_strx: 59
+ n_type: 0x24
+ n_sect: 1
+ n_desc: 0
+ n_value: 4294968056
- n_strx: 1
n_type: 0x24
n_sect: 0
@@ -1854,21 +2009,21 @@ LinkEditData:
n_sect: 1
n_desc: 0
n_value: 0
- - n_strx: 59
+ - n_strx: 76
n_type: 0x1E
n_sect: 1
n_desc: 0
- n_value: 4294968152
- - n_strx: 84
+ n_value: 4294968172
+ - n_strx: 101
n_type: 0x1E
n_sect: 1
n_desc: 0
- n_value: 4294968156
+ n_value: 4294968176
- n_strx: 2
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 4294968064
+ n_value: 4294968072
- n_strx: 8
n_type: 0xF
n_sect: 1
@@ -1883,8 +2038,13 @@ LinkEditData:
n_type: 0xF
n_sect: 1
n_desc: 0
- n_value: 4294968048
- - n_strx: 109
+ n_value: 4294968064
+ - n_strx: 59
+ n_type: 0xF
+ n_sect: 1
+ n_desc: 0
+ n_value: 4294968056
+ - n_strx: 126
n_type: 0xF
n_sect: 1
n_desc: 16
@@ -1892,6 +2052,7 @@ LinkEditData:
StringTable:
- ' '
- _main
+ - _function1_copy1
- _function2_copy1
- _function3_copy2
- _function2_copy2
@@ -1904,6 +2065,5 @@ LinkEditData:
- ''
- ''
- ''
- - ''
- FunctionStarts: [ 0x2F0, 0x2F8, 0x300, 0x358, 0x35C ]
+ FunctionStarts: [ 0x2F0, 0x2F8, 0x300, 0x308, 0x36C, 0x370 ]
...
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofexe
index f69c0b1..fc530a4 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofraw
index ed679dc..d492076 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic-histogram.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/basic.memprofexe
index 14cbfeb..8810ee1 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/basic.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/basic.memprofraw
index c3ac49e..6943c18 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/basic.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofexe
new file mode 100755
index 0000000..14cbfeb
--- /dev/null
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofraw
new file mode 100644
index 0000000..c3ac49e
--- /dev/null
+++ b/llvm/test/tools/llvm-profdata/Inputs/basic_v4.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofexe
index 1b4db88..4ab8040 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofraw
index e959e76..c6aec8d 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/buildid.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/inline.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/inline.memprofexe
index 2822f2f..5af6c81 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/inline.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/inline.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/inline.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/inline.memprofraw
index 05deb2e..8958af9 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/inline.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/inline.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/multi.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/multi.memprofexe
index 22c6136..e9ec22c 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/multi.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/multi.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/multi.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/multi.memprofraw
index 364aa1c..3952768 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/multi.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/multi.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofexe
index 34db7e7..e50f663 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofraw
index 7a7d3a6..df6fcb1 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/padding-histogram.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/pic.memprofexe b/llvm/test/tools/llvm-profdata/Inputs/pic.memprofexe
index f7d1723..63eea44 100755
--- a/llvm/test/tools/llvm-profdata/Inputs/pic.memprofexe
+++ b/llvm/test/tools/llvm-profdata/Inputs/pic.memprofexe
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/Inputs/pic.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/pic.memprofraw
index 0920028..b6a733a 100644
--- a/llvm/test/tools/llvm-profdata/Inputs/pic.memprofraw
+++ b/llvm/test/tools/llvm-profdata/Inputs/pic.memprofraw
Binary files differ
diff --git a/llvm/test/tools/llvm-profdata/memprof-basic-histogram.test b/llvm/test/tools/llvm-profdata/memprof-basic-histogram.test
index 3d30a62..ce534db 100644
--- a/llvm/test/tools/llvm-profdata/memprof-basic-histogram.test
+++ b/llvm/test/tools/llvm-profdata/memprof-basic-histogram.test
@@ -7,7 +7,7 @@ We expect 5 MIBs, each with different AccessHistogramValues.
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 5
CHECK-NEXT: NumAllocFunctions: 3
@@ -241,4 +241,4 @@ CHECK-NEXT: MinLifetimeAccessDensity: 56000
CHECK-NEXT: MaxLifetimeAccessDensity: 56000
CHECK-NEXT: AccessHistogramSize: 8
CHECK-NEXT: AccessHistogram: {{[0-9]+}}
-CHECK-NEXT: AccessHistogramValues: 168 147 126 105 84 63 42 21 \ No newline at end of file
+CHECK-NEXT: AccessHistogramValues: 168 147 126 105 84 63 42 21
diff --git a/llvm/test/tools/llvm-profdata/memprof-basic.test b/llvm/test/tools/llvm-profdata/memprof-basic.test
index e15df50..81550eb 100644
--- a/llvm/test/tools/llvm-profdata/memprof-basic.test
+++ b/llvm/test/tools/llvm-profdata/memprof-basic.test
@@ -8,7 +8,7 @@ additional allocations which do not originate from the main binary are pruned.
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 2
CHECK-NEXT: NumAllocFunctions: 1
@@ -96,4 +96,4 @@ CHECK-NEXT: TotalLifetimeAccessDensity: 20000
CHECK-NEXT: MinLifetimeAccessDensity: 20000
CHECK-NEXT: MaxLifetimeAccessDensity: 20000
CHECK-NEXT: AccessHistogramSize: 0
-CHECK-NEXT: AccessHistogram: 0 \ No newline at end of file
+CHECK-NEXT: AccessHistogram: 0
diff --git a/llvm/test/tools/llvm-profdata/memprof-basic_v4.test b/llvm/test/tools/llvm-profdata/memprof-basic_v4.test
new file mode 100644
index 0000000..79d4fe2
--- /dev/null
+++ b/llvm/test/tools/llvm-profdata/memprof-basic_v4.test
@@ -0,0 +1,102 @@
+REQUIRES: x86_64-linux
+
+This is a copy of memprof-basic.test with slight changes to check that we can still read v3 of memprofraw.
+
+Inputs cannot and should not be updated.
+
+RUN: llvm-profdata show --memory %p/Inputs/basic_v4.memprofraw --profiled-binary %p/Inputs/basic_v4.memprofexe -o - | FileCheck %s
+
+We expect 2 MIB entries, 1 each for the malloc calls in the program. Any
+additional allocations which do not originate from the main binary are pruned.
+
+CHECK: MemprofProfile:
+CHECK-NEXT: Summary:
+CHECK-NEXT: Version: 4
+CHECK-NEXT: NumSegments: {{[0-9]+}}
+CHECK-NEXT: NumMibInfo: 2
+CHECK-NEXT: NumAllocFunctions: 1
+CHECK-NEXT: NumStackOffsets: 2
+CHECK-NEXT: Segments:
+CHECK-NEXT: -
+CHECK-NEXT: BuildId: {{[[:xdigit:]]+}}
+CHECK-NEXT: Start: 0x{{[[:xdigit:]]+}}
+CHECK-NEXT: End: 0x{{[[:xdigit:]]+}}
+CHECK-NEXT: Offset: 0x{{[[:xdigit:]]+}}
+CHECK-NEXT: -
+
+CHECK: Records:
+CHECK-NEXT: -
+CHECK-NEXT: FunctionGUID: {{[0-9]+}}
+CHECK-NEXT: AllocSites:
+CHECK-NEXT: -
+CHECK-NEXT: Callstack:
+CHECK-NEXT: -
+CHECK-NEXT: Function: {{[0-9]+}}
+CHECK-NEXT: SymbolName: main
+CHECK-NEXT: LineOffset: 1
+CHECK-NEXT: Column: 21
+CHECK-NEXT: Inline: 0
+CHECK-NEXT: MemInfoBlock:
+CHECK-NEXT: AllocCount: 1
+CHECK-NEXT: TotalAccessCount: 2
+CHECK-NEXT: MinAccessCount: 2
+CHECK-NEXT: MaxAccessCount: 2
+CHECK-NEXT: TotalSize: 10
+CHECK-NEXT: MinSize: 10
+CHECK-NEXT: MaxSize: 10
+CHECK-NEXT: AllocTimestamp: {{[0-9]+}}
+CHECK-NEXT: DeallocTimestamp: {{[0-9]+}}
+CHECK-NEXT: TotalLifetime: 0
+CHECK-NEXT: MinLifetime: 0
+CHECK-NEXT: MaxLifetime: 0
+CHECK-NEXT: AllocCpuId: {{[0-9]+}}
+CHECK-NEXT: DeallocCpuId: {{[0-9]+}}
+CHECK-NEXT: NumMigratedCpu: 0
+CHECK-NEXT: NumLifetimeOverlaps: 0
+CHECK-NEXT: NumSameAllocCpu: 0
+CHECK-NEXT: NumSameDeallocCpu: 0
+CHECK-NEXT: DataTypeId: {{[0-9]+}}
+CHECK-NEXT: TotalAccessDensity: 20
+CHECK-NEXT: MinAccessDensity: 20
+CHECK-NEXT: MaxAccessDensity: 20
+CHECK-NEXT: TotalLifetimeAccessDensity: 20000
+CHECK-NEXT: MinLifetimeAccessDensity: 20000
+CHECK-NEXT: MaxLifetimeAccessDensity: 20000
+CHECK-NEXT: AccessHistogramSize: 0
+CHECK-NEXT: AccessHistogram: 0
+CHECK-NEXT: -
+CHECK-NEXT: Callstack:
+CHECK-NEXT: -
+CHECK-NEXT: Function: {{[0-9]+}}
+CHECK-NEXT: SymbolName: main
+CHECK-NEXT: LineOffset: 4
+CHECK-NEXT: Column: 15
+CHECK-NEXT: Inline: 0
+CHECK-NEXT: MemInfoBlock:
+CHECK-NEXT: AllocCount: 1
+CHECK-NEXT: TotalAccessCount: 2
+CHECK-NEXT: MinAccessCount: 2
+CHECK-NEXT: MaxAccessCount: 2
+CHECK-NEXT: TotalSize: 10
+CHECK-NEXT: MinSize: 10
+CHECK-NEXT: MaxSize: 10
+CHECK-NEXT: AllocTimestamp: {{[0-9]+}}
+CHECK-NEXT: DeallocTimestamp: {{[0-9]+}}
+CHECK-NEXT: TotalLifetime: 0
+CHECK-NEXT: MinLifetime: 0
+CHECK-NEXT: MaxLifetime: 0
+CHECK-NEXT: AllocCpuId: {{[0-9]+}}
+CHECK-NEXT: DeallocCpuId: {{[0-9]+}}
+CHECK-NEXT: NumMigratedCpu: 0
+CHECK-NEXT: NumLifetimeOverlaps: 0
+CHECK-NEXT: NumSameAllocCpu: 0
+CHECK-NEXT: NumSameDeallocCpu: 0
+CHECK-NEXT: DataTypeId: {{[0-9]+}}
+CHECK-NEXT: TotalAccessDensity: 20
+CHECK-NEXT: MinAccessDensity: 20
+CHECK-NEXT: MaxAccessDensity: 20
+CHECK-NEXT: TotalLifetimeAccessDensity: 20000
+CHECK-NEXT: MinLifetimeAccessDensity: 20000
+CHECK-NEXT: MaxLifetimeAccessDensity: 20000
+CHECK-NEXT: AccessHistogramSize: 0
+CHECK-NEXT: AccessHistogram: 0
diff --git a/llvm/test/tools/llvm-profdata/memprof-inline.test b/llvm/test/tools/llvm-profdata/memprof-inline.test
index 79ce2ad..4a3f620 100644
--- a/llvm/test/tools/llvm-profdata/memprof-inline.test
+++ b/llvm/test/tools/llvm-profdata/memprof-inline.test
@@ -5,7 +5,7 @@ RUN: llvm-profdata show --memory %p/Inputs/inline.memprofraw --profiled-binary %
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 2
CHECK-NEXT: NumAllocFunctions: 2
diff --git a/llvm/test/tools/llvm-profdata/memprof-multi.test b/llvm/test/tools/llvm-profdata/memprof-multi.test
index 6243982..35f94df 100644
--- a/llvm/test/tools/llvm-profdata/memprof-multi.test
+++ b/llvm/test/tools/llvm-profdata/memprof-multi.test
@@ -7,7 +7,7 @@ We expect 2 MIB entries, 1 each for the malloc calls in the program.
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 2
CHECK-NEXT: NumAllocFunctions: 1
diff --git a/llvm/test/tools/llvm-profdata/memprof-padding-histogram.test b/llvm/test/tools/llvm-profdata/memprof-padding-histogram.test
index 4ba58e3..2d0346e 100644
--- a/llvm/test/tools/llvm-profdata/memprof-padding-histogram.test
+++ b/llvm/test/tools/llvm-profdata/memprof-padding-histogram.test
@@ -7,7 +7,7 @@ We expect 2 different MIBs with histogram values. This test is to make sure we p
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 2
CHECK-NEXT: NumAllocFunctions: 1
@@ -21,79 +21,79 @@ CHECK-NEXT: Offset: 0x{{[[:xdigit:]]+}}
CHECK-NEXT: -
CHECK: Records:
-CHEC-NEXT FunctionGUID: {{[0-9]+}}
-CHEC-NEXT AllocSites:
-CHEC-NEXT -
-CHEC-NEXT Callstack:
-CHEC-NEXT -
-CHEC-NEXT Function: {{[0-9]+}}
-CHEC-NEXT SymbolName: main
-CHEC-NEXT LineOffset: 3
-CHEC-NEXT Column: 10
-CHEC-NEXT Inline: 0
-CHEC-NEXT MemInfoBlock:
-CHEC-NEXT AllocCount: 1
-CHEC-NEXT TotalAccessCount: 5
-CHEC-NEXT MinAccessCount: 5
-CHEC-NEXT MaxAccessCount: 5
-CHEC-NEXT TotalSize: 24
-CHEC-NEXT MinSize: 24
-CHEC-NEXT MaxSize: 24
-CHEC-NEXT AllocTimestamp: {{[0-9]+}}
-CHEC-NEXT DeallocTimestamp: {{[0-9]+}}
-CHEC-NEXT TotalLifetime: 0
-CHEC-NEXT MinLifetime: 0
-CHEC-NEXT MaxLifetime: 0
-CHEC-NEXT AllocCpuId: 11
-CHEC-NEXT DeallocCpuId: 11
-CHEC-NEXT NumMigratedCpu: 0
-CHEC-NEXT NumLifetimeOverlaps: 0
-CHEC-NEXT NumSameAllocCpu: 0
-CHEC-NEXT NumSameDeallocCpu: 0
-CHEC-NEXT DataTypeId: 0
-CHEC-NEXT TotalAccessDensity: 20
-CHEC-NEXT MinAccessDensity: 20
-CHEC-NEXT MaxAccessDensity: 20
-CHEC-NEXT TotalLifetimeAccessDensity: 20000
-CHEC-NEXT MinLifetimeAccessDensity: 20000
-CHEC-NEXT MaxLifetimeAccessDensity: 20000
-CHEC-NEXT AccessHistogramSize: 3
-CHEC-NEXT AccessHistogram: {{[0-9]+}}
-CHEC-NEXT AccessHistogramValues: -2 -1 -2
-CHEC-NEXT -
-CHEC-NEXT Callstack:
-CHEC-NEXT -
-CHEC-NEXT Function: {{[0-9]+}}
-CHEC-NEXT SymbolName: main
-CHEC-NEXT LineOffset: 10
-CHEC-NEXT Column: 10
-CHEC-NEXT Inline: 0
-CHEC-NEXT MemInfoBlock:
-CHEC-NEXT AllocCount: 1
-CHEC-NEXT TotalAccessCount: 4
-CHEC-NEXT MinAccessCount: 4
-CHEC-NEXT MaxAccessCount: 4
-CHEC-NEXT TotalSize: 48
-CHEC-NEXT MinSize: 48
-CHEC-NEXT MaxSize: 48
-CHEC-NEXT AllocTimestamp: {{[0-9]+}}
-CHEC-NEXT DeallocTimestamp: {{[0-9]+}}
-CHEC-NEXT TotalLifetime: 0
-CHEC-NEXT MinLifetime: 0
-CHEC-NEXT MaxLifetime: 0
-CHEC-NEXT AllocCpuId: 11
-CHEC-NEXT DeallocCpuId: 11
-CHEC-NEXT NumMigratedCpu: 0
-CHEC-NEXT NumLifetimeOverlaps: 0
-CHEC-NEXT NumSameAllocCpu: 0
-CHEC-NEXT NumSameDeallocCpu: 0
-CHEC-NEXT DataTypeId: 0
-CHEC-NEXT TotalAccessDensity: 8
-CHEC-NEXT MinAccessDensity: 8
-CHEC-NEXT MaxAccessDensity: 8
-CHEC-NEXT TotalLifetimeAccessDensity: 8000
-CHEC-NEXT MinLifetimeAccessDensity: 8000
-CHEC-NEXT MaxLifetimeAccessDensity: 8000
-CHEC-NEXT AccessHistogramSize: 6
-CHEC-NEXT AccessHistogram: {{[0-9]+}}
-CHEC-NEXT AccessHistogramValues: -2 -0 -0 -0 -1 -1 \ No newline at end of file
+CHECK-NEXT FunctionGUID: {{[0-9]+}}
+CHECK-NEXT AllocSites:
+CHECK-NEXT -
+CHECK-NEXT Callstack:
+CHECK-NEXT -
+CHECK-NEXT Function: {{[0-9]+}}
+CHECK-NEXT SymbolName: main
+CHECK-NEXT LineOffset: 3
+CHECK-NEXT Column: 10
+CHECK-NEXT Inline: 0
+CHECK-NEXT MemInfoBlock:
+CHECK-NEXT AllocCount: 1
+CHECK-NEXT TotalAccessCount: 5
+CHECK-NEXT MinAccessCount: 5
+CHECK-NEXT MaxAccessCount: 5
+CHECK-NEXT TotalSize: 24
+CHECK-NEXT MinSize: 24
+CHECK-NEXT MaxSize: 24
+CHECK-NEXT AllocTimestamp: {{[0-9]+}}
+CHECK-NEXT DeallocTimestamp: {{[0-9]+}}
+CHECK-NEXT TotalLifetime: 0
+CHECK-NEXT MinLifetime: 0
+CHECK-NEXT MaxLifetime: 0
+CHECK-NEXT AllocCpuId: 11
+CHECK-NEXT DeallocCpuId: 11
+CHECK-NEXT NumMigratedCpu: 0
+CHECK-NEXT NumLifetimeOverlaps: 0
+CHECK-NEXT NumSameAllocCpu: 0
+CHECK-NEXT NumSameDeallocCpu: 0
+CHECK-NEXT DataTypeId: 0
+CHECK-NEXT TotalAccessDensity: 20
+CHECK-NEXT MinAccessDensity: 20
+CHECK-NEXT MaxAccessDensity: 20
+CHECK-NEXT TotalLifetimeAccessDensity: 20000
+CHECK-NEXT MinLifetimeAccessDensity: 20000
+CHECK-NEXT MaxLifetimeAccessDensity: 20000
+CHECK-NEXT AccessHistogramSize: 3
+CHECK-NEXT AccessHistogram: {{[0-9]+}}
+CHECK-NEXT AccessHistogramValues: -2 -1 -2
+CHECK-NEXT -
+CHECK-NEXT Callstack:
+CHECK-NEXT -
+CHECK-NEXT Function: {{[0-9]+}}
+CHECK-NEXT SymbolName: main
+CHECK-NEXT LineOffset: 10
+CHECK-NEXT Column: 10
+CHECK-NEXT Inline: 0
+CHECK-NEXT MemInfoBlock:
+CHECK-NEXT AllocCount: 1
+CHECK-NEXT TotalAccessCount: 4
+CHECK-NEXT MinAccessCount: 4
+CHECK-NEXT MaxAccessCount: 4
+CHECK-NEXT TotalSize: 48
+CHECK-NEXT MinSize: 48
+CHECK-NEXT MaxSize: 48
+CHECK-NEXT AllocTimestamp: {{[0-9]+}}
+CHECK-NEXT DeallocTimestamp: {{[0-9]+}}
+CHECK-NEXT TotalLifetime: 0
+CHECK-NEXT MinLifetime: 0
+CHECK-NEXT MaxLifetime: 0
+CHECK-NEXT AllocCpuId: 11
+CHECK-NEXT DeallocCpuId: 11
+CHECK-NEXT NumMigratedCpu: 0
+CHECK-NEXT NumLifetimeOverlaps: 0
+CHECK-NEXT NumSameAllocCpu: 0
+CHECK-NEXT NumSameDeallocCpu: 0
+CHECK-NEXT DataTypeId: 0
+CHECK-NEXT TotalAccessDensity: 8
+CHECK-NEXT MinAccessDensity: 8
+CHECK-NEXT MaxAccessDensity: 8
+CHECK-NEXT TotalLifetimeAccessDensity: 8000
+CHECK-NEXT MinLifetimeAccessDensity: 8000
+CHECK-NEXT MaxLifetimeAccessDensity: 8000
+CHECK-NEXT AccessHistogramSize: 6
+CHECK-NEXT AccessHistogram: {{[0-9]+}}
+CHECK-NEXT AccessHistogramValues: -2 -0 -0 -0 -1 -1
diff --git a/llvm/test/tools/llvm-profdata/memprof-pic.test b/llvm/test/tools/llvm-profdata/memprof-pic.test
index 78d2c5c..66203ef 100644
--- a/llvm/test/tools/llvm-profdata/memprof-pic.test
+++ b/llvm/test/tools/llvm-profdata/memprof-pic.test
@@ -11,7 +11,7 @@ RUN: llvm-profdata show --memory %p/Inputs/pic.memprofraw --profiled-binary %p/I
CHECK: MemprofProfile:
CHECK-NEXT: Summary:
-CHECK-NEXT: Version: 4
+CHECK-NEXT: Version: 5
CHECK-NEXT: NumSegments: {{[0-9]+}}
CHECK-NEXT: NumMibInfo: 2
CHECK-NEXT: NumAllocFunctions: 1
@@ -100,4 +100,4 @@ CHECK-NEXT: TotalLifetimeAccessDensity: 20000
CHECK-NEXT: MinLifetimeAccessDensity: 20000
CHECK-NEXT: MaxLifetimeAccessDensity: 20000
CHECK-NEXT: AccessHistogramSize: 0
-CHECK-NEXT: AccessHistogram: 0 \ No newline at end of file
+CHECK-NEXT: AccessHistogram: 0
diff --git a/llvm/test/tools/obj2yaml/ELF/eflags.yaml b/llvm/test/tools/obj2yaml/ELF/eflags.yaml
new file mode 100644
index 0000000..da16a62
--- /dev/null
+++ b/llvm/test/tools/obj2yaml/ELF/eflags.yaml
@@ -0,0 +1,31 @@
+## Check how obj2yaml dumps e_flags field.
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2MSB
+ Type: ET_EXEC
+ Machine: EM_SPARC32PLUS
+ Flags: [ [[FLAGS]] ]
+
+# RUN: yaml2obj -DFLAGS="EF_SPARC_32PLUS " %s -o %t2
+# RUN: obj2yaml %t2 | FileCheck %s --check-prefix=FLAG
+
+# FLAG: --- !ELF
+# FLAG-NEXT: FileHeader:
+# FLAG-NEXT: Class: ELFCLASS64
+# FLAG-NEXT: Data: ELFDATA2MSB
+# FLAG-NEXT: Type: ET_EXEC
+# FLAG-NEXT: Machine: EM_SPARC32PLUS
+# FLAG-NEXT: Flags: [ EF_SPARC_32PLUS ]
+
+# RUN: yaml2obj -DFLAGS="EF_SPARC_HAL_R1 " %s -o %t3
+# RUN: obj2yaml %t3 | FileCheck %s --check-prefix=FLAG2
+
+# FLAG2: --- !ELF
+# FLAG2-NEXT: FileHeader:
+# FLAG2-NEXT: Class: ELFCLASS64
+# FLAG2-NEXT: Data: ELFDATA2MSB
+# FLAG2-NEXT: Type: ET_EXEC
+# FLAG2-NEXT: Machine: EM_SPARC32PLUS
+# FLAG2-NEXT: Flags: [ EF_SPARC_HAL_R1 ]
diff --git a/llvm/test/tools/yaml2obj/file-header-flags.yaml b/llvm/test/tools/yaml2obj/file-header-flags.yaml
new file mode 100644
index 0000000..baa101a
--- /dev/null
+++ b/llvm/test/tools/yaml2obj/file-header-flags.yaml
@@ -0,0 +1,25 @@
+## Test for FileHeader Flags.
+
+## When FLAGS variable isn't defined, the e_flags value is 0.
+## Otherwise, it's the specified value.
+
+# RUN: yaml2obj %s -o %t
+# RUN: llvm-readobj -h %t | FileCheck %s --check-prefixes=NO-FLAG
+
+# RUN: yaml2obj %s -o %t -DFLAGS=[EF_SPARC_32PLUS]
+# RUN: llvm-readobj -h %t | FileCheck %s --check-prefixes=FLAG
+
+!ELF
+FileHeader:
+ Class: ELFCLASS32
+ Data: ELFDATA2LSB
+ Type: ET_EXEC
+ Machine: EM_SPARC32PLUS
+ Flags: [[FLAGS=<none>]]
+
+# NO-FLAG: Flags [ (0x0)
+# NO-FLAG-NEXT: ]
+
+# FLAG: Flags [ (0x100)
+# FLAG-NEXT: EF_SPARC_32PLUS (0x100)
+# FLAG-NEXT: ]
diff --git a/llvm/tools/llvm-exegesis/lib/X86/Target.cpp b/llvm/tools/llvm-exegesis/lib/X86/Target.cpp
index 1659cfb..5dae6c0 100644
--- a/llvm/tools/llvm-exegesis/lib/X86/Target.cpp
+++ b/llvm/tools/llvm-exegesis/lib/X86/Target.cpp
@@ -30,11 +30,12 @@
#include <memory>
#include <string>
#include <vector>
-#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
+#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64)) && \
+ !defined(_M_ARM64EC)
#include <immintrin.h>
#include <intrin.h>
#endif
-#if defined(_MSC_VER) && defined(_M_X64)
+#if defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC)
#include <float.h> // For _clearfp in ~X86SavedState().
#endif
@@ -654,7 +655,7 @@ namespace {
class X86SavedState : public ExegesisTarget::SavedState {
public:
X86SavedState() {
-#if defined(_MSC_VER) && defined(_M_X64)
+#if defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC)
_fxsave64(FPState);
Eflags = __readeflags();
#elif defined(__GNUC__) && defined(__x86_64__)
@@ -668,7 +669,7 @@ public:
~X86SavedState() {
// Restoring the X87 state does not flush pending exceptions, make sure
// these exceptions are flushed now.
-#if defined(_MSC_VER) && defined(_M_X64)
+#if defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC)
_clearfp();
_fxrstor64(FPState);
__writeeflags(Eflags);
@@ -682,7 +683,7 @@ public:
}
private:
-#if defined(__x86_64__) || defined(_M_X64)
+#if defined(__x86_64__) || defined(_M_X64) && !defined(_M_ARM64EC)
alignas(16) char FPState[512];
uint64_t Eflags;
#endif
@@ -824,8 +825,9 @@ private:
// For now, only do the check if we see an Intel machine because
// the counter uses some intel-specific magic and it could
// be confuse and think an AMD machine actually has LBR support.
-#if defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
- defined(_M_X64)
+#if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
+ defined(_M_X64)) && \
+ !defined(_M_ARM64EC)
using namespace sys::detail::x86;
if (getVendorSignature() == VendorSignatures::GENUINE_INTEL)
diff --git a/llvm/tools/llvm-readobj/ELFDumper.cpp b/llvm/tools/llvm-readobj/ELFDumper.cpp
index 94ce386..66153ad 100644
--- a/llvm/tools/llvm-readobj/ELFDumper.cpp
+++ b/llvm/tools/llvm-readobj/ELFDumper.cpp
@@ -1683,7 +1683,9 @@ const EnumEntry<unsigned> ElfHeaderNVPTXFlags[] = {
ENUM_ENT(EF_CUDA_SM75, "sm_75"), ENUM_ENT(EF_CUDA_SM80, "sm_80"),
ENUM_ENT(EF_CUDA_SM86, "sm_86"), ENUM_ENT(EF_CUDA_SM87, "sm_87"),
ENUM_ENT(EF_CUDA_SM89, "sm_89"), ENUM_ENT(EF_CUDA_SM90, "sm_90"),
- ENUM_ENT(EF_CUDA_SM100, "sm_100"), ENUM_ENT(EF_CUDA_SM120, "sm_120"),
+ ENUM_ENT(EF_CUDA_SM100, "sm_100"), ENUM_ENT(EF_CUDA_SM101, "sm_101"),
+ ENUM_ENT(EF_CUDA_SM103, "sm_103"), ENUM_ENT(EF_CUDA_SM120, "sm_120"),
+ ENUM_ENT(EF_CUDA_SM121, "sm_121"),
};
const EnumEntry<unsigned> ElfHeaderRISCVFlags[] = {
@@ -3659,8 +3661,10 @@ template <class ELFT> void GNUELFDumper<ELFT>::printFileHeaders() {
ElfFlags = printFlags(e.e_flags, ArrayRef(ElfHeaderXtensaFlags),
unsigned(ELF::EF_XTENSA_MACH));
else if (e.e_machine == EM_CUDA) {
- ElfFlags = printFlags(e.e_flags, ArrayRef(ElfHeaderNVPTXFlags),
- unsigned(ELF::EF_CUDA_SM));
+ unsigned Mask = e.e_ident[ELF::EI_ABIVERSION] == ELF::ELFABIVERSION_CUDA_V1
+ ? ELF::EF_CUDA_SM
+ : ELF::EF_CUDA_SM_MASK;
+ ElfFlags = printFlags(e.e_flags, ArrayRef(ElfHeaderNVPTXFlags), Mask);
if (e.e_ident[ELF::EI_ABIVERSION] == ELF::ELFABIVERSION_CUDA_V1 &&
(e.e_flags & ELF::EF_CUDA_ACCELERATORS_V1))
ElfFlags += "a";
diff --git a/llvm/tools/obj2yaml/elf2yaml.cpp b/llvm/tools/obj2yaml/elf2yaml.cpp
index 53455b8..ab15553 100644
--- a/llvm/tools/obj2yaml/elf2yaml.cpp
+++ b/llvm/tools/obj2yaml/elf2yaml.cpp
@@ -281,7 +281,8 @@ template <class ELFT> Expected<ELFYAML::Object *> ELFDumper<ELFT>::dump() {
Y->Header.Type = Obj.getHeader().e_type;
if (Obj.getHeader().e_machine != 0)
Y->Header.Machine = ELFYAML::ELF_EM(Obj.getHeader().e_machine);
- Y->Header.Flags = Obj.getHeader().e_flags;
+ if (Obj.getHeader().e_flags != 0)
+ Y->Header.Flags = ELFYAML::ELF_EF(Obj.getHeader().e_flags);
Y->Header.Entry = Obj.getHeader().e_entry;
// Dump sections
diff --git a/llvm/unittests/Analysis/InlineAdvisorPlugin/CMakeLists.txt b/llvm/unittests/Analysis/InlineAdvisorPlugin/CMakeLists.txt
index deabf11..d9da627 100644
--- a/llvm/unittests/Analysis/InlineAdvisorPlugin/CMakeLists.txt
+++ b/llvm/unittests/Analysis/InlineAdvisorPlugin/CMakeLists.txt
@@ -2,7 +2,7 @@
# libraries, but expects them to exist in the process loading the plugin. This
# doesn't work with DLLs on Windows (where a shared library can't have undefined
# references), so just skip this testcase on Windows.
-if ((NOT WIN32 OR LLVM_BUILD_LLVM_DYLIB) AND NOT CYGWIN)
+if ((NOT WIN32 AND NOT CYGWIN) OR LLVM_BUILD_LLVM_DYLIB)
unset(LLVM_LINK_COMPONENTS)
add_llvm_library(InlineAdvisorPlugin MODULE BUILDTREE_ONLY
InlineAdvisorPlugin.cpp
diff --git a/llvm/unittests/Analysis/InlineOrderPlugin/CMakeLists.txt b/llvm/unittests/Analysis/InlineOrderPlugin/CMakeLists.txt
index 0b37ceb..941e18efc 100644
--- a/llvm/unittests/Analysis/InlineOrderPlugin/CMakeLists.txt
+++ b/llvm/unittests/Analysis/InlineOrderPlugin/CMakeLists.txt
@@ -2,7 +2,7 @@
# libraries, but expects them to exist in the process loading the plugin. This
# doesn't work with DLLs on Windows (where a shared library can't have undefined
# references), so just skip this testcase on Windows.
-if ((NOT WIN32 OR LLVM_BUILD_LLVM_DYLIB) AND NOT CYGWIN)
+if ((NOT WIN32 AND NOT CYGWIN) OR LLVM_BUILD_LLVM_DYLIB)
unset(LLVM_LINK_COMPONENTS)
add_llvm_library(InlineOrderPlugin MODULE BUILDTREE_ONLY
InlineOrderPlugin.cpp
diff --git a/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp b/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp
index 4e0bf38..16b9979 100644
--- a/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp
+++ b/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp
@@ -859,3 +859,35 @@ TEST_F(SelectionDAGPatternMatchTest, MatchZeroOneAllOnes) {
EXPECT_TRUE(sd_match(Vec, DAG.get(), m_AllOnes(true)));
}
}
+
+TEST_F(SelectionDAGPatternMatchTest, MatchSelectCCLike) {
+ using namespace SDPatternMatch;
+
+ SDValue LHS = DAG->getConstant(1, SDLoc(), MVT::i32);
+ SDValue RHS = DAG->getConstant(2, SDLoc(), MVT::i32);
+ SDValue TVal = DAG->getConstant(3, SDLoc(), MVT::i32);
+ SDValue FVal = DAG->getConstant(4, SDLoc(), MVT::i32);
+ SDValue Select = DAG->getNode(ISD::SELECT_CC, SDLoc(), MVT::i32, LHS, RHS,
+ TVal, FVal, DAG->getCondCode(ISD::SETLT));
+
+ ISD::CondCode CC = ISD::SETLT;
+ EXPECT_TRUE(sd_match(
+ Select, m_SelectCCLike(m_Specific(LHS), m_Specific(RHS), m_Specific(TVal),
+ m_Specific(FVal), m_CondCode(CC))));
+}
+
+TEST_F(SelectionDAGPatternMatchTest, MatchSelectCC) {
+ using namespace SDPatternMatch;
+
+ SDValue LHS = DAG->getConstant(1, SDLoc(), MVT::i32);
+ SDValue RHS = DAG->getConstant(2, SDLoc(), MVT::i32);
+ SDValue TVal = DAG->getConstant(3, SDLoc(), MVT::i32);
+ SDValue FVal = DAG->getConstant(4, SDLoc(), MVT::i32);
+ SDValue Select = DAG->getNode(ISD::SELECT_CC, SDLoc(), MVT::i32, LHS, RHS,
+ TVal, FVal, DAG->getCondCode(ISD::SETLT));
+
+ ISD::CondCode CC = ISD::SETLT;
+ EXPECT_TRUE(sd_match(Select, m_SelectCC(m_Specific(LHS), m_Specific(RHS),
+ m_Specific(TVal), m_Specific(FVal),
+ m_CondCode(CC))));
+}
diff --git a/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp b/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp
index 080f257..ec94083 100644
--- a/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp
+++ b/llvm/unittests/ExecutionEngine/Orc/CoreAPIsTest.cpp
@@ -1575,7 +1575,7 @@ TEST_F(CoreAPIsStandardTest, TestLookupWithThreadedMaterialization) {
EXPECT_EQ(FooLookupResult.getFlags(), FooSym.getFlags())
<< "lookup returned incorrect flags";
- std::unique_lock Lock(WorkThreadsMutex);
+ std::unique_lock<std::mutex> Lock(WorkThreadsMutex);
// This works because every child thread that is allowed to use WorkThreads
// must either be in WorkThreads or its parent must be in WorkThreads.
while (!WorkThreads.empty()) {
diff --git a/llvm/unittests/ExecutionEngine/Orc/MemoryMapperTest.cpp b/llvm/unittests/ExecutionEngine/Orc/MemoryMapperTest.cpp
index 6ab659d..fea9eab 100644
--- a/llvm/unittests/ExecutionEngine/Orc/MemoryMapperTest.cpp
+++ b/llvm/unittests/ExecutionEngine/Orc/MemoryMapperTest.cpp
@@ -81,7 +81,7 @@ TEST(MemoryMapperTest, InitializeDeinitialize) {
{
// Provide working memory
char *WA1 = Mapper->prepare(Mem1->Start, HW.size() + 1);
- std::strcpy(static_cast<char *>(WA1), HW.c_str());
+ std::strcpy(WA1, HW.c_str());
}
// A structure to be passed to initialize
@@ -106,7 +106,7 @@ TEST(MemoryMapperTest, InitializeDeinitialize) {
{
char *WA2 = Mapper->prepare(Mem1->Start + PageSize, HW.size() + 1);
- std::strcpy(static_cast<char *>(WA2), HW.c_str());
+ std::strcpy(WA2, HW.c_str());
}
MemoryMapper::AllocInfo Alloc2;
@@ -159,7 +159,7 @@ TEST(MemoryMapperTest, InitializeDeinitialize) {
EXPECT_THAT_ERROR(Mem2.takeError(), Succeeded());
char *WA = Mapper->prepare(Mem2->Start, HW.size() + 1);
- std::strcpy(static_cast<char *>(WA), HW.c_str());
+ std::strcpy(WA, HW.c_str());
MemoryMapper::AllocInfo Alloc3;
{
diff --git a/llvm/unittests/Frontend/CMakeLists.txt b/llvm/unittests/Frontend/CMakeLists.txt
index 281d509..6e4ba5d 100644
--- a/llvm/unittests/Frontend/CMakeLists.txt
+++ b/llvm/unittests/Frontend/CMakeLists.txt
@@ -11,6 +11,7 @@ set(LLVM_LINK_COMPONENTS
)
add_llvm_unittest(LLVMFrontendTests
+ HLSLBindingTest.cpp
HLSLRootSignatureDumpTest.cpp
HLSLRootSignatureRangesTest.cpp
OpenACCTest.cpp
diff --git a/llvm/unittests/Frontend/HLSLBindingTest.cpp b/llvm/unittests/Frontend/HLSLBindingTest.cpp
new file mode 100644
index 0000000..ca2f7b5
--- /dev/null
+++ b/llvm/unittests/Frontend/HLSLBindingTest.cpp
@@ -0,0 +1,275 @@
+//===------ HLSLBindingTest.cpp - Resource binding tests ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Frontend/HLSL/HLSLBinding.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/DXILABI.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+using namespace llvm::dxil;
+
+MATCHER_P(HasSpecificValue, Value, "") {
+ return arg.has_value() && *arg == Value;
+}
+
+static void
+checkExpectedSpaceAndFreeRanges(hlsl::BindingInfo::RegisterSpace &RegSpace,
+ uint32_t ExpSpace,
+ ArrayRef<uint32_t> ExpValues) {
+ EXPECT_EQ(RegSpace.Space, ExpSpace);
+ EXPECT_EQ(RegSpace.FreeRanges.size() * 2, ExpValues.size());
+ unsigned I = 0;
+ for (auto &R : RegSpace.FreeRanges) {
+ EXPECT_EQ(R.LowerBound, ExpValues[I]);
+ EXPECT_EQ(R.UpperBound, ExpValues[I + 1]);
+ I += 2;
+ }
+}
+
+TEST(HLSLBindingTest, TestTrivialCase) {
+ hlsl::BindingInfoBuilder Builder;
+
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/0, /*LowerBound=*/5,
+ /*UpperBound=*/5, /*Cookie=*/nullptr);
+ bool HasOverlap;
+ hlsl::BindingInfo Info = Builder.calculateBindingInfo(HasOverlap);
+
+ EXPECT_FALSE(HasOverlap);
+
+ // check that UAV has exactly one gap
+ hlsl::BindingInfo::BindingSpaces &UAVSpaces =
+ Info.getBindingSpaces(ResourceClass::UAV);
+ EXPECT_EQ(UAVSpaces.RC, ResourceClass::UAV);
+ EXPECT_EQ(UAVSpaces.Spaces.size(), 1u);
+ checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[0], 0, {0u, 4u, 6u, ~0u});
+
+ // check that other kinds of register spaces are all available
+ for (auto RC :
+ {ResourceClass::SRV, ResourceClass::CBuffer, ResourceClass::Sampler}) {
+ hlsl::BindingInfo::BindingSpaces &Spaces = Info.getBindingSpaces(RC);
+ EXPECT_EQ(Spaces.RC, RC);
+ EXPECT_EQ(Spaces.Spaces.size(), 0u);
+ }
+}
+
+TEST(HLSLBindingTest, TestManyBindings) {
+ hlsl::BindingInfoBuilder Builder;
+
+ // cbuffer CB : register(b3) { int a; }
+ // RWBuffer<float4> A[5] : register(u10, space20);
+ // StructuredBuffer<int> B : register(t5);
+ // RWBuffer<float> C : register(u5);
+ // StructuredBuffer<int> D[5] : register(t0);
+ // RWBuffer<float> E[2] : register(u2);
+ // SamplerState S1 : register(s5, space2);
+ // SamplerState S2 : register(s4, space2);
+ Builder.trackBinding(ResourceClass::CBuffer, /*Space=*/0, /*LowerBound=*/3,
+ /*UpperBound=*/3, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/20, /*LowerBound=*/10,
+ /*UpperBound=*/14, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/0, /*LowerBound=*/5,
+ /*UpperBound=*/5, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/0, /*LowerBound=*/5,
+ /*UpperBound=*/5, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/0, /*LowerBound=*/0,
+ /*UpperBound=*/4, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/0, /*LowerBound=*/2,
+ /*UpperBound=*/3, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::Sampler, /*Space=*/2, /*LowerBound=*/5,
+ /*UpperBound=*/5, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::Sampler, /*Space=*/2, /*LowerBound=*/4,
+ /*UpperBound=*/4, /*Cookie=*/nullptr);
+ bool HasOverlap;
+ hlsl::BindingInfo Info = Builder.calculateBindingInfo(HasOverlap);
+
+ EXPECT_FALSE(HasOverlap);
+
+ hlsl::BindingInfo::BindingSpaces &SRVSpaces =
+ Info.getBindingSpaces(ResourceClass::SRV);
+ EXPECT_EQ(SRVSpaces.RC, ResourceClass::SRV);
+ EXPECT_EQ(SRVSpaces.Spaces.size(), 1u);
+ // verify that consecutive bindings are merged
+ // (SRVSpaces has only one free space range {6, ~0u}).
+ checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[0], 0, {6u, ~0u});
+
+ hlsl::BindingInfo::BindingSpaces &UAVSpaces =
+ Info.getBindingSpaces(ResourceClass::UAV);
+ EXPECT_EQ(UAVSpaces.RC, ResourceClass::UAV);
+ EXPECT_EQ(UAVSpaces.Spaces.size(), 2u);
+ checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[0], 0,
+ {0u, 1u, 4u, 4u, 6u, ~0u});
+ checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[1], 20, {0u, 9u, 15u, ~0u});
+
+ hlsl::BindingInfo::BindingSpaces &CBufferSpaces =
+ Info.getBindingSpaces(ResourceClass::CBuffer);
+ EXPECT_EQ(CBufferSpaces.RC, ResourceClass::CBuffer);
+ EXPECT_EQ(CBufferSpaces.Spaces.size(), 1u);
+ checkExpectedSpaceAndFreeRanges(CBufferSpaces.Spaces[0], 0,
+ {0u, 2u, 4u, ~0u});
+
+ hlsl::BindingInfo::BindingSpaces &SamplerSpaces =
+ Info.getBindingSpaces(ResourceClass::Sampler);
+ EXPECT_EQ(SamplerSpaces.RC, ResourceClass::Sampler);
+ EXPECT_EQ(SamplerSpaces.Spaces.size(), 1u);
+ checkExpectedSpaceAndFreeRanges(SamplerSpaces.Spaces[0], 2,
+ {0u, 3u, 6u, ~0u});
+}
+
+TEST(HLSLBindingTest, TestUnboundedAndOverlap) {
+ hlsl::BindingInfoBuilder Builder;
+
+ // StructuredBuffer<float> A[] : register(t5);
+ // StructuredBuffer<float> B[3] : register(t0);
+ // StructuredBuffer<float> C[] : register(t0, space2);
+ // StructuredBuffer<float> D : register(t4, space2); /* overlapping */
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/0, /*LowerBound=*/5,
+ /*UpperBound=*/~0u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/0, /*LowerBound=*/0,
+ /*UpperBound=*/2, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/2, /*LowerBound=*/0,
+ /*UpperBound=*/~0u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/2, /*LowerBound=*/4,
+ /*UpperBound=*/4, /*Cookie=*/nullptr);
+ bool HasOverlap;
+ hlsl::BindingInfo Info = Builder.calculateBindingInfo(HasOverlap);
+
+ EXPECT_TRUE(HasOverlap);
+
+ hlsl::BindingInfo::BindingSpaces &SRVSpaces =
+ Info.getBindingSpaces(ResourceClass::SRV);
+ EXPECT_EQ(SRVSpaces.RC, ResourceClass::SRV);
+ EXPECT_EQ(SRVSpaces.Spaces.size(), 2u);
+ checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[0], 0, {3, 4});
+ checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[1], 2, {});
+}
+
+TEST(HLSLBindingTest, TestExactOverlap) {
+ hlsl::BindingInfoBuilder Builder;
+
+ // Since the bindings overlap exactly we need sigil values to differentiate
+ // them.
+ // Note: We initialize these to 0 to suppress a -Wuninitialized-const-pointer,
+ // but we really are just using the stack addresses here.
+ char ID1 = 0;
+ char ID2 = 0;
+
+ // StructuredBuffer<float> A : register(t5);
+ // StructuredBuffer<float> B : register(t5);
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/0, /*LowerBound=*/5,
+ /*UpperBound=*/5, /*Cookie=*/&ID1);
+ Builder.trackBinding(ResourceClass::SRV, /*Space=*/0, /*LowerBound=*/5,
+ /*UpperBound=*/5, /*Cookie=*/&ID2);
+ bool HasOverlap;
+ hlsl::BindingInfo Info = Builder.calculateBindingInfo(HasOverlap);
+
+ EXPECT_TRUE(HasOverlap);
+
+ hlsl::BindingInfo::BindingSpaces &SRVSpaces =
+ Info.getBindingSpaces(ResourceClass::SRV);
+ EXPECT_EQ(SRVSpaces.RC, ResourceClass::SRV);
+ EXPECT_EQ(SRVSpaces.Spaces.size(), 1u);
+ checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[0], 0, {0u, 4u, 6u, ~0u});
+}
+
+TEST(HLSLBindingTest, TestEndOfRange) {
+ hlsl::BindingInfoBuilder Builder;
+
+ // RWBuffer<float> A : register(u4294967295); /* UINT32_MAX */
+ // RWBuffer<float> B[10] : register(u4294967286, space1);
+ // /* range (UINT32_MAX - 9, UINT32_MAX )*/
+ // RWBuffer<float> C[10] : register(u2147483647, space2);
+ // /* range (INT32_MAX, INT32_MAX + 9) */
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/0, /*LowerBound=*/~0u,
+ /*UpperBound=*/~0u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/1, /*LowerBound=*/~0u - 9u,
+ /*UpperBound=*/~0u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/2,
+ /*LowerBound=*/2147483647u,
+ /*UpperBound=*/2147483647u + 9u, /*Cookie=*/nullptr);
+ bool HasOverlap;
+ hlsl::BindingInfo Info = Builder.calculateBindingInfo(HasOverlap);
+
+ EXPECT_FALSE(HasOverlap);
+
+ hlsl::BindingInfo::BindingSpaces &UAVSpaces =
+ Info.getBindingSpaces(ResourceClass::UAV);
+ EXPECT_EQ(UAVSpaces.RC, ResourceClass::UAV);
+ EXPECT_EQ(UAVSpaces.Spaces.size(), 3u);
+ checkExpectedSpaceAndFreeRanges(
+ UAVSpaces.Spaces[0], 0, {0, std::numeric_limits<uint32_t>::max() - 1});
+ checkExpectedSpaceAndFreeRanges(
+ UAVSpaces.Spaces[1], 1, {0, std::numeric_limits<uint32_t>::max() - 10});
+ checkExpectedSpaceAndFreeRanges(
+ UAVSpaces.Spaces[2], 2,
+ {0, static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) - 1u,
+ static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) + 10u,
+ std::numeric_limits<uint32_t>::max()});
+}
+
+TEST(HLSLBindingTest, TestFindAvailable) {
+ hlsl::BindingInfoBuilder Builder;
+
+ // RWBuffer<float> A : register(u5);
+ // RWBuffer<float> B : register(u5, space1);
+ // RWBuffer<float> C : register(u11, space1);
+ // RWBuffer<float> D[] : register(u1, space2);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/0, /*LowerBound=*/5u,
+ /*UpperBound=*/5u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/1, /*LowerBound=*/2u,
+ /*UpperBound=*/2u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/1, /*LowerBound=*/6u,
+ /*UpperBound=*/6u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/2, /*LowerBound=*/1u,
+ /*UpperBound=*/~0u, /*Cookie=*/nullptr);
+ Builder.trackBinding(ResourceClass::UAV, /*Space=*/3, /*LowerBound=*/~0u - 1,
+ /*UpperBound=*/~0u - 1, /*Cookie=*/nullptr);
+ bool HasOverlap;
+ hlsl::BindingInfo Info = Builder.calculateBindingInfo(HasOverlap);
+
+ EXPECT_FALSE(HasOverlap);
+
+ // In space 0, we find room for a small binding at the beginning and
+ // a large binding after `A`'s binding.
+ std::optional<uint32_t> V =
+ Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/0, /*Size=*/1);
+ EXPECT_THAT(V, HasSpecificValue(0u));
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/0, /*Size=*/100);
+ EXPECT_THAT(V, HasSpecificValue(6u));
+
+ // In space 1, we try to fit larger bindings in the fill the gaps. Note that
+ // we do this largest to smallest and observe that the gaps that are earlier
+ // still exist.
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/1, /*Size=*/4);
+ EXPECT_THAT(V, HasSpecificValue(7u));
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/1, /*Size=*/3);
+ EXPECT_THAT(V, HasSpecificValue(3u));
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/1, /*Size=*/2);
+ EXPECT_THAT(V, HasSpecificValue(0u));
+ // At this point, we've used all of the contiguous space up to 11u
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/1, /*Size=*/1);
+ EXPECT_THAT(V, HasSpecificValue(11u));
+
+ // Space 2 is mostly full, we can only fit into the room at the beginning.
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/2, /*Size=*/2);
+ EXPECT_FALSE(V.has_value());
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/2, /*Size=*/1);
+ EXPECT_THAT(V, HasSpecificValue(0u));
+
+ // Finding space for an unbounded array is a bit funnier. it always needs to
+ // go a the end of the available space.
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/3,
+ /*Size=*/~0u);
+ // Note that we end up with a size 1 array here, starting at ~0u.
+ EXPECT_THAT(V, HasSpecificValue(~0u));
+ V = Info.findAvailableBinding(ResourceClass::UAV, /*Space=*/4,
+ /*Size=*/~0u);
+ // In an empty space we find the slot at the beginning.
+ EXPECT_THAT(V, HasSpecificValue(0u));
+}
diff --git a/llvm/unittests/Support/VirtualFileSystemTest.cpp b/llvm/unittests/Support/VirtualFileSystemTest.cpp
index eb590e4..fc3ccea 100644
--- a/llvm/unittests/Support/VirtualFileSystemTest.cpp
+++ b/llvm/unittests/Support/VirtualFileSystemTest.cpp
@@ -225,7 +225,7 @@ std::string getPosixPath(const Twine &S) {
} // end anonymous namespace
TEST(VirtualFileSystemTest, StatusQueries) {
- IntrusiveRefCntPtr<DummyFileSystem> D(new DummyFileSystem());
+ auto D = makeIntrusiveRefCnt<DummyFileSystem>();
ErrorOr<vfs::Status> Status((std::error_code()));
D->addRegularFile("/foo");
@@ -265,11 +265,11 @@ TEST(VirtualFileSystemTest, StatusQueries) {
}
TEST(VirtualFileSystemTest, BaseOnlyOverlay) {
- IntrusiveRefCntPtr<DummyFileSystem> D(new DummyFileSystem());
+ auto D = makeIntrusiveRefCnt<DummyFileSystem>();
ErrorOr<vfs::Status> Status((std::error_code()));
EXPECT_FALSE(Status = D->status("/foo"));
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(new vfs::OverlayFileSystem(D));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(D);
EXPECT_FALSE(Status = O->status("/foo"));
D->addRegularFile("/foo");
@@ -283,13 +283,12 @@ TEST(VirtualFileSystemTest, BaseOnlyOverlay) {
}
TEST(VirtualFileSystemTest, GetRealPathInOverlay) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addRegularFile("/foo");
Lower->addSymlink("/lower_link");
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Upper);
// Regular file.
@@ -312,11 +311,10 @@ TEST(VirtualFileSystemTest, GetRealPathInOverlay) {
}
TEST(VirtualFileSystemTest, OverlayFiles) {
- IntrusiveRefCntPtr<DummyFileSystem> Base(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Middle(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Top(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Base));
+ auto Base = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Middle = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Top = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Base);
O->pushOverlay(Middle);
O->pushOverlay(Top);
@@ -351,10 +349,9 @@ TEST(VirtualFileSystemTest, OverlayFiles) {
}
TEST(VirtualFileSystemTest, OverlayDirsNonMerged) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Upper);
Lower->addDirectory("/lower-only");
@@ -376,10 +373,9 @@ TEST(VirtualFileSystemTest, OverlayDirsNonMerged) {
TEST(VirtualFileSystemTest, MergedDirPermissions) {
// merged directories get the permissions of the upper dir
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Upper);
ErrorOr<vfs::Status> Status((std::error_code()));
@@ -401,12 +397,11 @@ TEST(VirtualFileSystemTest, MergedDirPermissions) {
}
TEST(VirtualFileSystemTest, OverlayIterator) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addRegularFile("/foo");
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Upper);
ErrorOr<vfs::Status> Status((std::error_code()));
@@ -784,10 +779,9 @@ static void checkContents(DirIter I, ArrayRef<StringRef> ExpectedOut) {
}
TEST(VirtualFileSystemTest, OverlayIteration) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Upper);
std::error_code EC;
@@ -808,11 +802,10 @@ TEST(VirtualFileSystemTest, OverlayIteration) {
}
TEST(VirtualFileSystemTest, OverlayRecursiveIteration) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Middle(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Middle = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Middle);
O->pushOverlay(Upper);
@@ -850,11 +843,10 @@ TEST(VirtualFileSystemTest, OverlayRecursiveIteration) {
}
TEST(VirtualFileSystemTest, ThreeLevelIteration) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Middle(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Middle = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Middle);
O->pushOverlay(Upper);
@@ -870,11 +862,10 @@ TEST(VirtualFileSystemTest, ThreeLevelIteration) {
}
TEST(VirtualFileSystemTest, HiddenInIteration) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Middle(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Middle = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Upper = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Middle);
O->pushOverlay(Upper);
@@ -913,11 +904,10 @@ TEST(VirtualFileSystemTest, HiddenInIteration) {
}
TEST(VirtualFileSystemTest, Visit) {
- IntrusiveRefCntPtr<DummyFileSystem> Base(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Middle(new DummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Top(new DummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Base));
+ auto Base = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Middle = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto Top = makeIntrusiveRefCnt<DummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Base);
O->pushOverlay(Middle);
O->pushOverlay(Top);
@@ -984,10 +974,9 @@ TEST(OverlayFileSystemTest, PrintOutput) {
}
TEST(OverlayFileSystemTest, Exists) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new NoStatusDummyFileSystem());
- IntrusiveRefCntPtr<DummyFileSystem> Upper(new NoStatusDummyFileSystem());
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto Lower = makeIntrusiveRefCnt<NoStatusDummyFileSystem>();
+ auto Upper = makeIntrusiveRefCnt<NoStatusDummyFileSystem>();
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(Upper);
Lower->addDirectory("/both");
@@ -1008,8 +997,7 @@ TEST(OverlayFileSystemTest, Exists) {
}
TEST(ProxyFileSystemTest, Basic) {
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> Base(
- new vfs::InMemoryFileSystem());
+ auto Base = makeIntrusiveRefCnt<vfs::InMemoryFileSystem>();
vfs::ProxyFileSystem PFS(Base);
Base->addFile("/a", 0, MemoryBuffer::getMemBuffer("test"));
@@ -1606,7 +1594,7 @@ TEST_F(VFSFromYAMLTest, BasicVFSFromYAML) {
}
TEST_F(VFSFromYAMLTest, MappedFiles) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/foo/bar");
Lower->addRegularFile("//root/foo/bar/a");
IntrusiveRefCntPtr<vfs::FileSystem> FS = getFromYAMLString(
@@ -1642,8 +1630,7 @@ TEST_F(VFSFromYAMLTest, MappedFiles) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
// file
@@ -1720,7 +1707,7 @@ TEST_F(VFSFromYAMLTest, MappedFiles) {
}
TEST_F(VFSFromYAMLTest, MappedRoot) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/foo/bar");
Lower->addRegularFile("//root/foo/bar/a");
IntrusiveRefCntPtr<vfs::FileSystem> FS =
@@ -1735,8 +1722,7 @@ TEST_F(VFSFromYAMLTest, MappedRoot) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
// file
@@ -1762,7 +1748,7 @@ TEST_F(VFSFromYAMLTest, MappedRoot) {
}
TEST_F(VFSFromYAMLTest, RemappedDirectoryOverlay) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
Lower->addDirectory("//root/bar");
@@ -1783,8 +1769,7 @@ TEST_F(VFSFromYAMLTest, RemappedDirectoryOverlay) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
ErrorOr<vfs::Status> S = O->status("//root/foo");
@@ -1806,7 +1791,7 @@ TEST_F(VFSFromYAMLTest, RemappedDirectoryOverlay) {
}
TEST_F(VFSFromYAMLTest, RemappedDirectoryOverlayNoExternalNames) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
Lower->addDirectory("//root/bar");
@@ -1847,7 +1832,7 @@ TEST_F(VFSFromYAMLTest, RemappedDirectoryOverlayNoExternalNames) {
}
TEST_F(VFSFromYAMLTest, RemappedDirectoryOverlayNoFallthrough) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
Lower->addDirectory("//root/bar");
@@ -1887,13 +1872,12 @@ TEST_F(VFSFromYAMLTest, RemappedDirectoryOverlayNoFallthrough) {
}
TEST_F(VFSFromYAMLTest, ReturnsRequestedPathVFSMiss) {
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> BaseFS(
- new vfs::InMemoryFileSystem);
+ auto BaseFS = makeIntrusiveRefCnt<vfs::InMemoryFileSystem>();
BaseFS->addFile("//root/foo/a", 0,
MemoryBuffer::getMemBuffer("contents of a"));
ASSERT_FALSE(BaseFS->setCurrentWorkingDirectory("//root/foo"));
auto RemappedFS = vfs::RedirectingFileSystem::create(
- {}, /*UseExternalNames=*/false, *BaseFS);
+ {}, /*UseExternalNames=*/false, BaseFS);
auto OpenedF = RemappedFS->openFileForRead("a");
ASSERT_FALSE(OpenedF.getError());
@@ -1915,8 +1899,7 @@ TEST_F(VFSFromYAMLTest, ReturnsRequestedPathVFSMiss) {
}
TEST_F(VFSFromYAMLTest, ReturnsExternalPathVFSHit) {
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> BaseFS(
- new vfs::InMemoryFileSystem);
+ auto BaseFS = makeIntrusiveRefCnt<vfs::InMemoryFileSystem>();
BaseFS->addFile("//root/foo/realname", 0,
MemoryBuffer::getMemBuffer("contents of a"));
auto FS =
@@ -1955,7 +1938,7 @@ TEST_F(VFSFromYAMLTest, ReturnsExternalPathVFSHit) {
}
TEST_F(VFSFromYAMLTest, RootRelativeTest) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/foo/bar");
Lower->addRegularFile("//root/foo/bar/a");
IntrusiveRefCntPtr<vfs::FileSystem> FS =
@@ -1996,7 +1979,7 @@ TEST_F(VFSFromYAMLTest, RootRelativeTest) {
ASSERT_FALSE(S.getError());
EXPECT_EQ("//root/foo/bar/a", S->getName());
#else
- IntrusiveRefCntPtr<DummyFileSystem> LowerWindows(new DummyFileSystem());
+ auto LowerWindows = makeIntrusiveRefCnt<DummyFileSystem>();
LowerWindows->addDirectory("\\\\root\\foo\\bar");
LowerWindows->addRegularFile("\\\\root\\foo\\bar\\a");
FS = getFromYAMLString("{\n"
@@ -2018,8 +2001,7 @@ TEST_F(VFSFromYAMLTest, RootRelativeTest) {
}
TEST_F(VFSFromYAMLTest, ReturnsInternalPathVFSHit) {
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> BaseFS(
- new vfs::InMemoryFileSystem);
+ auto BaseFS = makeIntrusiveRefCnt<vfs::InMemoryFileSystem>();
BaseFS->addFile("//root/foo/realname", 0,
MemoryBuffer::getMemBuffer("contents of a"));
auto FS =
@@ -2058,7 +2040,7 @@ TEST_F(VFSFromYAMLTest, ReturnsInternalPathVFSHit) {
}
TEST_F(VFSFromYAMLTest, CaseInsensitive) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addRegularFile("//root/foo/bar/a");
IntrusiveRefCntPtr<vfs::FileSystem> FS = getFromYAMLString(
"{ 'case-sensitive': 'false',\n"
@@ -2076,8 +2058,7 @@ TEST_F(VFSFromYAMLTest, CaseInsensitive) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
ErrorOr<vfs::Status> S = O->status("//root/XX");
@@ -2094,7 +2075,7 @@ TEST_F(VFSFromYAMLTest, CaseInsensitive) {
}
TEST_F(VFSFromYAMLTest, CaseSensitive) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addRegularFile("//root/foo/bar/a");
IntrusiveRefCntPtr<vfs::FileSystem> FS = getFromYAMLString(
"{ 'case-sensitive': 'true',\n"
@@ -2112,8 +2093,7 @@ TEST_F(VFSFromYAMLTest, CaseSensitive) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
ErrorOr<vfs::Status> SS = O->status("//root/xx");
@@ -2126,7 +2106,7 @@ TEST_F(VFSFromYAMLTest, CaseSensitive) {
}
TEST_F(VFSFromYAMLTest, IllegalVFSFile) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
// invalid YAML at top-level
IntrusiveRefCntPtr<vfs::FileSystem> FS = getFromYAMLString("{]", Lower);
@@ -2252,7 +2232,7 @@ TEST_F(VFSFromYAMLTest, IllegalVFSFile) {
}
TEST_F(VFSFromYAMLTest, UseExternalName) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addRegularFile("//root/external/file");
IntrusiveRefCntPtr<vfs::FileSystem> FS =
@@ -2304,7 +2284,7 @@ TEST_F(VFSFromYAMLTest, UseExternalName) {
}
TEST_F(VFSFromYAMLTest, MultiComponentPath) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addRegularFile("//root/other");
// file in roots
@@ -2350,7 +2330,7 @@ TEST_F(VFSFromYAMLTest, MultiComponentPath) {
}
TEST_F(VFSFromYAMLTest, TrailingSlashes) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addRegularFile("//root/other");
// file in roots
@@ -2369,7 +2349,7 @@ TEST_F(VFSFromYAMLTest, TrailingSlashes) {
}
TEST_F(VFSFromYAMLTest, DirectoryIteration) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addDirectory("//root/foo");
Lower->addDirectory("//root/foo/bar");
@@ -2399,8 +2379,7 @@ TEST_F(VFSFromYAMLTest, DirectoryIteration) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
std::error_code EC;
@@ -2416,7 +2395,7 @@ TEST_F(VFSFromYAMLTest, DirectoryIterationSameDirMultipleEntries) {
if (!supportsSameDirMultipleYAMLEntries())
GTEST_SKIP();
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/zab");
Lower->addDirectory("//root/baz");
Lower->addRegularFile("//root/zab/a");
@@ -2449,8 +2428,7 @@ TEST_F(VFSFromYAMLTest, DirectoryIterationSameDirMultipleEntries) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
std::error_code EC;
@@ -2461,7 +2439,7 @@ TEST_F(VFSFromYAMLTest, DirectoryIterationSameDirMultipleEntries) {
TEST_F(VFSFromYAMLTest, RecursiveDirectoryIterationLevel) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/a");
Lower->addDirectory("//root/a/b");
Lower->addDirectory("//root/a/b/c");
@@ -2484,8 +2462,7 @@ TEST_F(VFSFromYAMLTest, RecursiveDirectoryIterationLevel) {
Lower);
ASSERT_NE(FS.get(), nullptr);
- IntrusiveRefCntPtr<vfs::OverlayFileSystem> O(
- new vfs::OverlayFileSystem(Lower));
+ auto O = makeIntrusiveRefCnt<vfs::OverlayFileSystem>(Lower);
O->pushOverlay(FS);
std::error_code EC;
@@ -2503,7 +2480,7 @@ TEST_F(VFSFromYAMLTest, RecursiveDirectoryIterationLevel) {
}
TEST_F(VFSFromYAMLTest, RelativePaths) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
std::error_code EC;
SmallString<128> CWD;
EC = llvm::sys::fs::current_path(CWD);
@@ -2557,7 +2534,7 @@ TEST_F(VFSFromYAMLTest, RelativePaths) {
}
TEST_F(VFSFromYAMLTest, NonFallthroughDirectoryIteration) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addRegularFile("//root/a");
Lower->addRegularFile("//root/b");
@@ -2586,7 +2563,7 @@ TEST_F(VFSFromYAMLTest, NonFallthroughDirectoryIteration) {
}
TEST_F(VFSFromYAMLTest, DirectoryIterationWithDuplicates) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addRegularFile("//root/a");
Lower->addRegularFile("//root/b");
@@ -2614,7 +2591,7 @@ TEST_F(VFSFromYAMLTest, DirectoryIterationWithDuplicates) {
}
TEST_F(VFSFromYAMLTest, DirectoryIterationErrorInVFSLayer) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
@@ -2643,7 +2620,7 @@ TEST_F(VFSFromYAMLTest, DirectoryIterationErrorInVFSLayer) {
}
TEST_F(VFSFromYAMLTest, GetRealPath) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//dir/");
Lower->addRegularFile("/foo");
Lower->addSymlink("/link");
@@ -2695,7 +2672,7 @@ TEST_F(VFSFromYAMLTest, GetRealPath) {
}
TEST_F(VFSFromYAMLTest, WorkingDirectory) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
@@ -2753,7 +2730,7 @@ TEST_F(VFSFromYAMLTest, WorkingDirectory) {
}
TEST_F(VFSFromYAMLTest, WorkingDirectoryFallthrough) {
- IntrusiveRefCntPtr<DummyFileSystem> Lower(new DummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<DummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
@@ -2835,7 +2812,7 @@ TEST_F(VFSFromYAMLTest, WorkingDirectoryFallthrough) {
}
TEST_F(VFSFromYAMLTest, WorkingDirectoryFallthroughInvalid) {
- IntrusiveRefCntPtr<ErrorDummyFileSystem> Lower(new ErrorDummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<ErrorDummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
@@ -2872,7 +2849,7 @@ TEST_F(VFSFromYAMLTest, WorkingDirectoryFallthroughInvalid) {
}
TEST_F(VFSFromYAMLTest, VirtualWorkingDirectory) {
- IntrusiveRefCntPtr<ErrorDummyFileSystem> Lower(new ErrorDummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<ErrorDummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addDirectory("//root/foo");
Lower->addRegularFile("//root/foo/a");
@@ -2928,7 +2905,7 @@ TEST_F(VFSFromYAMLTest, YAMLVFSWriterTest) {
raw_string_ostream OS(Buffer);
VFSWriter.write(OS);
- IntrusiveRefCntPtr<ErrorDummyFileSystem> Lower(new ErrorDummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<ErrorDummyFileSystem>();
Lower->addDirectory("//root/");
Lower->addDirectory("//root/a");
Lower->addRegularFile("//root/a/b");
@@ -2978,7 +2955,7 @@ TEST_F(VFSFromYAMLTest, YAMLVFSWriterTest2) {
raw_string_ostream OS(Buffer);
VFSWriter.write(OS);
- IntrusiveRefCntPtr<ErrorDummyFileSystem> Lower(new ErrorDummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<ErrorDummyFileSystem>();
IntrusiveRefCntPtr<vfs::FileSystem> FS = getFromYAMLRawString(Buffer, Lower);
EXPECT_NE(FS.get(), nullptr);
}
@@ -3010,7 +2987,7 @@ TEST_F(VFSFromYAMLTest, YAMLVFSWriterTest3) {
raw_string_ostream OS(Buffer);
VFSWriter.write(OS);
- IntrusiveRefCntPtr<ErrorDummyFileSystem> Lower(new ErrorDummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<ErrorDummyFileSystem>();
IntrusiveRefCntPtr<vfs::FileSystem> FS = getFromYAMLRawString(Buffer, Lower);
EXPECT_NE(FS.get(), nullptr);
}
@@ -3033,7 +3010,7 @@ TEST_F(VFSFromYAMLTest, YAMLVFSWriterTestHandleDirs) {
// We didn't add a single file - only directories.
EXPECT_EQ(Buffer.find("'type': 'file'"), std::string::npos);
- IntrusiveRefCntPtr<ErrorDummyFileSystem> Lower(new ErrorDummyFileSystem());
+ auto Lower = makeIntrusiveRefCnt<ErrorDummyFileSystem>();
Lower->addDirectory("//root/a");
Lower->addDirectory("//root/b");
Lower->addDirectory("//root/c");
@@ -3051,17 +3028,17 @@ TEST_F(VFSFromYAMLTest, YAMLVFSWriterTestHandleDirs) {
}
TEST_F(VFSFromYAMLTest, RedirectingWith) {
- IntrusiveRefCntPtr<DummyFileSystem> Both(new DummyFileSystem());
+ auto Both = makeIntrusiveRefCnt<DummyFileSystem>();
Both->addDirectory("//root/a");
Both->addRegularFile("//root/a/f");
Both->addDirectory("//root/b");
Both->addRegularFile("//root/b/f");
- IntrusiveRefCntPtr<DummyFileSystem> AOnly(new DummyFileSystem());
+ auto AOnly = makeIntrusiveRefCnt<DummyFileSystem>();
AOnly->addDirectory("//root/a");
AOnly->addRegularFile("//root/a/f");
- IntrusiveRefCntPtr<DummyFileSystem> BOnly(new DummyFileSystem());
+ auto BOnly = makeIntrusiveRefCnt<DummyFileSystem>();
BOnly->addDirectory("//root/b");
BOnly->addRegularFile("//root/b/f");
@@ -3166,8 +3143,7 @@ TEST_F(VFSFromYAMLTest, RedirectingWith) {
}
TEST(VFSFromRemappedFilesTest, Basic) {
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> BaseFS =
- new vfs::InMemoryFileSystem;
+ auto BaseFS = makeIntrusiveRefCnt<vfs::InMemoryFileSystem>();
BaseFS->addFile("//root/b", 0, MemoryBuffer::getMemBuffer("contents of b"));
BaseFS->addFile("//root/c", 0, MemoryBuffer::getMemBuffer("contents of c"));
@@ -3176,7 +3152,7 @@ TEST(VFSFromRemappedFilesTest, Basic) {
{"//root/a/b/c", "//root/c"},
};
auto RemappedFS = vfs::RedirectingFileSystem::create(
- RemappedFiles, /*UseExternalNames=*/false, *BaseFS);
+ RemappedFiles, /*UseExternalNames=*/false, BaseFS);
auto StatA = RemappedFS->status("//root/a/a");
auto StatB = RemappedFS->status("//root/a/b/c");
@@ -3194,8 +3170,7 @@ TEST(VFSFromRemappedFilesTest, Basic) {
}
TEST(VFSFromRemappedFilesTest, UseExternalNames) {
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> BaseFS =
- new vfs::InMemoryFileSystem;
+ auto BaseFS = makeIntrusiveRefCnt<vfs::InMemoryFileSystem>();
BaseFS->addFile("//root/b", 0, MemoryBuffer::getMemBuffer("contents of b"));
BaseFS->addFile("//root/c", 0, MemoryBuffer::getMemBuffer("contents of c"));
@@ -3204,7 +3179,7 @@ TEST(VFSFromRemappedFilesTest, UseExternalNames) {
{"//root/a/b/c", "//root/c"},
};
auto RemappedFS = vfs::RedirectingFileSystem::create(
- RemappedFiles, /*UseExternalNames=*/true, *BaseFS);
+ RemappedFiles, /*UseExternalNames=*/true, BaseFS);
auto StatA = RemappedFS->status("//root/a/a");
auto StatB = RemappedFS->status("//root/a/b/c");
@@ -3222,8 +3197,7 @@ TEST(VFSFromRemappedFilesTest, UseExternalNames) {
}
TEST(VFSFromRemappedFilesTest, LastMappingWins) {
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> BaseFS =
- new vfs::InMemoryFileSystem;
+ auto BaseFS = makeIntrusiveRefCnt<vfs::InMemoryFileSystem>();
BaseFS->addFile("//root/b", 0, MemoryBuffer::getMemBuffer("contents of b"));
BaseFS->addFile("//root/c", 0, MemoryBuffer::getMemBuffer("contents of c"));
@@ -3232,9 +3206,9 @@ TEST(VFSFromRemappedFilesTest, LastMappingWins) {
{"//root/a", "//root/c"},
};
auto RemappedFSKeepName = vfs::RedirectingFileSystem::create(
- RemappedFiles, /*UseExternalNames=*/false, *BaseFS);
+ RemappedFiles, /*UseExternalNames=*/false, BaseFS);
auto RemappedFSExternalName = vfs::RedirectingFileSystem::create(
- RemappedFiles, /*UseExternalNames=*/true, *BaseFS);
+ RemappedFiles, /*UseExternalNames=*/true, BaseFS);
auto StatKeepA = RemappedFSKeepName->status("//root/a");
auto StatExternalA = RemappedFSExternalName->status("//root/a");
@@ -3416,7 +3390,7 @@ TEST(RedirectingFileSystemTest, ExternalPaths) {
BaseFS->setCurrentWorkingDirectory("/cwd");
auto CheckFS = makeIntrusiveRefCnt<InterceptorFS>(BaseFS);
auto FS = vfs::RedirectingFileSystem::create({}, /*UseExternalNames=*/false,
- *CheckFS);
+ CheckFS);
FS->status("/a/../b");
FS->openFileForRead("c");
@@ -3442,7 +3416,7 @@ TEST(RedirectingFileSystemTest, ExternalPaths) {
}
TEST(RedirectingFileSystemTest, Exists) {
- IntrusiveRefCntPtr<DummyFileSystem> Dummy(new NoStatusDummyFileSystem());
+ auto Dummy = makeIntrusiveRefCnt<NoStatusDummyFileSystem>();
auto YAML =
MemoryBuffer::getMemBuffer("{\n"
" 'version': 0,\n"
@@ -3513,7 +3487,7 @@ TEST(RedirectingFileSystemTest, Exists) {
}
TEST(RedirectingFileSystemTest, ExistsFallback) {
- IntrusiveRefCntPtr<DummyFileSystem> Dummy(new NoStatusDummyFileSystem());
+ auto Dummy = makeIntrusiveRefCnt<NoStatusDummyFileSystem>();
auto YAML =
MemoryBuffer::getMemBuffer("{\n"
" 'version': 0,\n"
@@ -3537,7 +3511,7 @@ TEST(RedirectingFileSystemTest, ExistsFallback) {
}
TEST(RedirectingFileSystemTest, ExistsRedirectOnly) {
- IntrusiveRefCntPtr<DummyFileSystem> Dummy(new NoStatusDummyFileSystem());
+ auto Dummy = makeIntrusiveRefCnt<NoStatusDummyFileSystem>();
auto YAML =
MemoryBuffer::getMemBuffer("{\n"
" 'version': 0,\n"
diff --git a/llvm/unittests/Target/DirectX/ResourceBindingAnalysisTests.cpp b/llvm/unittests/Target/DirectX/ResourceBindingAnalysisTests.cpp
index 6cd1a48..d4715be 100644
--- a/llvm/unittests/Target/DirectX/ResourceBindingAnalysisTests.cpp
+++ b/llvm/unittests/Target/DirectX/ResourceBindingAnalysisTests.cpp
@@ -44,19 +44,6 @@ protected:
delete MAM;
delete Context;
}
-
- void checkExpectedSpaceAndFreeRanges(
- DXILResourceBindingInfo::RegisterSpace &RegSpace, uint32_t ExpSpace,
- ArrayRef<uint32_t> ExpValues) {
- EXPECT_EQ(RegSpace.Space, ExpSpace);
- EXPECT_EQ(RegSpace.FreeRanges.size() * 2, ExpValues.size());
- unsigned I = 0;
- for (auto &R : RegSpace.FreeRanges) {
- EXPECT_EQ(R.LowerBound, ExpValues[I]);
- EXPECT_EQ(R.UpperBound, ExpValues[I + 1]);
- I += 2;
- }
- }
};
TEST_F(ResourceBindingAnalysisTest, TestTrivialCase) {
@@ -76,103 +63,16 @@ entry:
EXPECT_EQ(false, DRBI.hasImplicitBinding());
EXPECT_EQ(false, DRBI.hasOverlappingBinding());
-
- // check that UAV has exactly one gap
- DXILResourceBindingInfo::BindingSpaces &UAVSpaces =
- DRBI.getBindingSpaces(ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.RC, ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.Spaces.size(), 1u);
- checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[0], 0,
- {0, 4, 6, UINT32_MAX});
-
- // check that other kinds of register spaces are all available
- for (auto RC :
- {ResourceClass::SRV, ResourceClass::CBuffer, ResourceClass::Sampler}) {
- DXILResourceBindingInfo::BindingSpaces &Spaces = DRBI.getBindingSpaces(RC);
- EXPECT_EQ(Spaces.RC, RC);
- EXPECT_EQ(Spaces.Spaces.size(), 0u);
- }
-}
-
-TEST_F(ResourceBindingAnalysisTest, TestManyBindings) {
- // cbuffer CB : register(b3) { int a; }
- // RWBuffer<float4> A[5] : register(u10, space20);
- // StructuredBuffer<int> B : register(t5);
- // RWBuffer<float> C : register(u5);
- // StructuredBuffer<int> D[5] : register(t0);
- // RWBuffer<float> E[2] : register(u2);
- // SamplerState S1 : register(s5, space2);
- // SamplerState S2 : register(s4, space2);
- StringRef Assembly = R"(
-%__cblayout_CB = type <{ i32 }>
-define void @main() {
-entry:
- %handleCB = call target("dx.CBuffer", target("dx.Layout", %__cblayout_CB, 4, 0)) @llvm.dx.resource.handlefrombinding(i32 0, i32 3, i32 1, i32 0, i1 false, ptr null)
- %handleA = call target("dx.TypedBuffer", float, 1, 0, 0) @llvm.dx.resource.handlefrombinding(i32 20, i32 10, i32 5, i32 0, i1 false, ptr null)
- %handleB = call target("dx.RawBuffer", i32, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 5, i32 1, i32 0, i1 false, ptr null)
- %handleC = call target("dx.TypedBuffer", float, 1, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 5, i32 1, i32 0, i1 false, ptr null)
- %handleD = call target("dx.RawBuffer", i32, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 5, i32 4, i1 false, ptr null)
- %handleE = call target("dx.TypedBuffer", float, 1, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 2, i32 2, i32 0, i1 false, ptr null)
- %handleS1 = call target("dx.Sampler", 0) @llvm.dx.resource.handlefrombinding(i32 2, i32 5, i32 1, i32 0, i1 false, ptr null)
- %handleS2 = call target("dx.Sampler", 0) @llvm.dx.resource.handlefrombinding(i32 2, i32 4, i32 1, i32 0, i1 false, ptr null)
- ; duplicate binding for the same resource
- %handleD2 = call target("dx.RawBuffer", i32, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 5, i32 4, i1 false, ptr null)
- ret void
-}
- )";
-
- auto M = parseAsm(Assembly);
-
- DXILResourceBindingInfo &DRBI =
- MAM->getResult<DXILResourceBindingAnalysis>(*M);
-
- EXPECT_EQ(false, DRBI.hasImplicitBinding());
- EXPECT_EQ(false, DRBI.hasOverlappingBinding());
-
- DXILResourceBindingInfo::BindingSpaces &SRVSpaces =
- DRBI.getBindingSpaces(ResourceClass::SRV);
- EXPECT_EQ(SRVSpaces.RC, ResourceClass::SRV);
- EXPECT_EQ(SRVSpaces.Spaces.size(), 1u);
- // verify that consecutive bindings are merged
- // (SRVSpaces has only one free space range {6, UINT32_MAX}).
- checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[0], 0, {6, UINT32_MAX});
-
- DXILResourceBindingInfo::BindingSpaces &UAVSpaces =
- DRBI.getBindingSpaces(ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.RC, ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.Spaces.size(), 2u);
- checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[0], 0,
- {0, 1, 4, 4, 6, UINT32_MAX});
- checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[1], 20,
- {0, 9, 15, UINT32_MAX});
-
- DXILResourceBindingInfo::BindingSpaces &CBufferSpaces =
- DRBI.getBindingSpaces(ResourceClass::CBuffer);
- EXPECT_EQ(CBufferSpaces.RC, ResourceClass::CBuffer);
- EXPECT_EQ(CBufferSpaces.Spaces.size(), 1u);
- checkExpectedSpaceAndFreeRanges(CBufferSpaces.Spaces[0], 0,
- {0, 2, 4, UINT32_MAX});
-
- DXILResourceBindingInfo::BindingSpaces &SamplerSpaces =
- DRBI.getBindingSpaces(ResourceClass::Sampler);
- EXPECT_EQ(SamplerSpaces.RC, ResourceClass::Sampler);
- EXPECT_EQ(SamplerSpaces.Spaces.size(), 1u);
- checkExpectedSpaceAndFreeRanges(SamplerSpaces.Spaces[0], 2,
- {0, 3, 6, UINT32_MAX});
}
-TEST_F(ResourceBindingAnalysisTest, TestUnboundedAndOverlap) {
- // StructuredBuffer<float> A[] : register(t5);
- // StructuredBuffer<float> B[3] : register(t0);
- // StructuredBuffer<float> C[] : register(t0, space2);
- // StructuredBuffer<float> D : register(t4, space2); /* overlapping */
+TEST_F(ResourceBindingAnalysisTest, TestOverlap) {
+ // StructuredBuffer<float> A[] : register(t0, space2);
+ // StructuredBuffer<float> B : register(t4, space2); /* overlapping */
StringRef Assembly = R"(
define void @main() {
entry:
- %handleA = call target("dx.RawBuffer", float, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 5, i32 -1, i32 10, i1 false, ptr null)
- %handleB = call target("dx.RawBuffer", float, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 3, i32 0, i1 false, ptr null)
- %handleC = call target("dx.RawBuffer", float, 0, 0) @llvm.dx.resource.handlefrombinding(i32 2, i32 0, i32 -1, i32 100, i1 false, ptr null)
- %handleD = call target("dx.RawBuffer", float, 0, 0) @llvm.dx.resource.handlefrombinding(i32 2, i32 4, i32 1, i32 0, i1 false, ptr null)
+ %handleA = call target("dx.RawBuffer", float, 0, 0) @llvm.dx.resource.handlefrombinding(i32 2, i32 0, i32 -1, i32 100, i1 false, ptr null)
+ %handleB = call target("dx.RawBuffer", float, 0, 0) @llvm.dx.resource.handlefrombinding(i32 2, i32 4, i32 1, i32 0, i1 false, ptr null)
ret void
}
)";
@@ -184,13 +84,6 @@ entry:
EXPECT_EQ(false, DRBI.hasImplicitBinding());
EXPECT_EQ(true, DRBI.hasOverlappingBinding());
-
- DXILResourceBindingInfo::BindingSpaces &SRVSpaces =
- DRBI.getBindingSpaces(ResourceClass::SRV);
- EXPECT_EQ(SRVSpaces.RC, ResourceClass::SRV);
- EXPECT_EQ(SRVSpaces.Spaces.size(), 2u);
- checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[0], 0, {3, 4});
- checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[1], 2, {});
}
TEST_F(ResourceBindingAnalysisTest, TestExactOverlap) {
@@ -214,49 +107,6 @@ entry:
EXPECT_EQ(false, DRBI.hasImplicitBinding());
EXPECT_EQ(true, DRBI.hasOverlappingBinding());
-
- DXILResourceBindingInfo::BindingSpaces &SRVSpaces =
- DRBI.getBindingSpaces(ResourceClass::SRV);
- EXPECT_EQ(SRVSpaces.RC, ResourceClass::SRV);
- EXPECT_EQ(SRVSpaces.Spaces.size(), 1u);
- checkExpectedSpaceAndFreeRanges(SRVSpaces.Spaces[0], 0,
- {0, 4, 6, UINT32_MAX});
-}
-
-TEST_F(ResourceBindingAnalysisTest, TestEndOfRange) {
- // RWBuffer<float> A : register(u4294967295); /* UINT32_MAX */
- // RWBuffer<float> B[10] : register(u4294967286, space1);
- // /* range (UINT32_MAX - 9, UINT32_MAX )*/
- // RWBuffer<float> C[10] : register(u2147483647, space2);
- // /* range (INT32_MAX, INT32_MAX + 9) */
- StringRef Assembly = R"(
-%__cblayout_CB = type <{ i32 }>
-define void @main() {
-entry:
- %handleA = call target("dx.TypedBuffer", float, 1, 0, 0) @llvm.dx.resource.handlefrombinding(i32 0, i32 -1, i32 1, i32 0, i1 false, ptr null)
- %handleB = call target("dx.TypedBuffer", float, 1, 0, 0) @llvm.dx.resource.handlefrombinding(i32 1, i32 -10, i32 10, i32 50, i1 false, ptr null)
- %handleC = call target("dx.TypedBuffer", float, 1, 0, 0) @llvm.dx.resource.handlefrombinding(i32 2, i32 2147483647, i32 10, i32 100, i1 false, ptr null)
- ret void
-}
- )";
-
- auto M = parseAsm(Assembly);
-
- DXILResourceBindingInfo &DRBI =
- MAM->getResult<DXILResourceBindingAnalysis>(*M);
-
- EXPECT_EQ(false, DRBI.hasImplicitBinding());
- EXPECT_EQ(false, DRBI.hasOverlappingBinding());
-
- DXILResourceBindingInfo::BindingSpaces &UAVSpaces =
- DRBI.getBindingSpaces(ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.RC, ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.Spaces.size(), 3u);
- checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[0], 0, {0, UINT32_MAX - 1});
- checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[1], 1, {0, UINT32_MAX - 10});
- checkExpectedSpaceAndFreeRanges(
- UAVSpaces.Spaces[2], 2,
- {0, (uint32_t)INT32_MAX - 1, (uint32_t)INT32_MAX + 10, UINT32_MAX});
}
TEST_F(ResourceBindingAnalysisTest, TestImplicitFlag) {
@@ -275,15 +125,8 @@ entry:
DXILResourceBindingInfo &DRBI =
MAM->getResult<DXILResourceBindingAnalysis>(*M);
- EXPECT_EQ(true, DRBI.hasImplicitBinding());
- EXPECT_EQ(false, DRBI.hasOverlappingBinding());
-
- DXILResourceBindingInfo::BindingSpaces &UAVSpaces =
- DRBI.getBindingSpaces(ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.RC, ResourceClass::UAV);
- EXPECT_EQ(UAVSpaces.Spaces.size(), 1u);
- checkExpectedSpaceAndFreeRanges(UAVSpaces.Spaces[0], 100,
- {0, 4, 6, UINT32_MAX});
+ EXPECT_TRUE(DRBI.hasImplicitBinding());
+ EXPECT_FALSE(DRBI.hasOverlappingBinding());
}
} // namespace
diff --git a/llvm/unittests/Transforms/Vectorize/VPlanVerifierTest.cpp b/llvm/unittests/Transforms/Vectorize/VPlanVerifierTest.cpp
index 6214ea3..b698b28 100644
--- a/llvm/unittests/Transforms/Vectorize/VPlanVerifierTest.cpp
+++ b/llvm/unittests/Transforms/Vectorize/VPlanVerifierTest.cpp
@@ -287,6 +287,44 @@ TEST_F(VPVerifierTest, BlockOutsideRegionWithParent) {
#endif
}
+TEST_F(VPVerifierTest, NonHeaderPHIInHeader) {
+ VPlan &Plan = getPlan();
+ VPValue *Zero = Plan.getOrAddLiveIn(ConstantInt::get(Type::getInt32Ty(C), 0));
+ auto *CanIV = new VPCanonicalIVPHIRecipe(Zero, {});
+ auto *BranchOnCond = new VPInstruction(VPInstruction::BranchOnCond, {CanIV});
+
+ VPBasicBlock *VPBB1 = Plan.getEntry();
+ VPBasicBlock *VPBB2 = Plan.createVPBasicBlock("header");
+
+ VPBB2->appendRecipe(CanIV);
+
+ PHINode *PHINode = PHINode::Create(Type::getInt32Ty(C), 2);
+ auto *IRPhi = new VPIRPhi(*PHINode);
+ VPBB2->appendRecipe(IRPhi);
+ VPBB2->appendRecipe(BranchOnCond);
+
+ VPRegionBlock *R1 = Plan.createVPRegionBlock(VPBB2, VPBB2, "R1");
+ VPBlockUtils::connectBlocks(VPBB1, R1);
+ VPBlockUtils::connectBlocks(R1, Plan.getScalarHeader());
+
+#if GTEST_HAS_STREAM_REDIRECTION
+ ::testing::internal::CaptureStderr();
+#endif
+ EXPECT_FALSE(verifyVPlanIsValid(Plan));
+#if GTEST_HAS_STREAM_REDIRECTION
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ EXPECT_STREQ(
+ "Found non-header PHI recipe in header VPBB: IR <badref> = phi i32 \n",
+ ::testing::internal::GetCapturedStderr().c_str());
+#else
+ EXPECT_STREQ("Found non-header PHI recipe in header VPBB",
+ ::testing::internal::GetCapturedStderr().c_str());
+#endif
+#endif
+
+ delete PHINode;
+}
+
class VPIRVerifierTest : public VPlanTestIRBase {};
TEST_F(VPIRVerifierTest, testVerifyIRPhi) {
diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn
index 218e36e..e3182b0 100644
--- a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn
@@ -46,6 +46,7 @@ static_library("bugprone") {
"IncorrectRoundingsCheck.cpp",
"InfiniteLoopCheck.cpp",
"IntegerDivisionCheck.cpp",
+ "InvalidEnumDefaultInitializationCheck.cpp",
"LambdaFunctionNameCheck.cpp",
"MacroParenthesesCheck.cpp",
"MacroRepeatedSideEffectsCheck.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn
index 3d08c3f..d394923 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn
@@ -8,6 +8,7 @@ static_library("Analysis") {
"//llvm/include/llvm/Config:config",
"//llvm/lib/BinaryFormat",
"//llvm/lib/IR",
+ "//llvm/lib/Frontend/HLSL",
"//llvm/lib/ProfileData",
"//llvm/lib/Support",
"//llvm/lib/TargetParser",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Frontend/HLSL/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Frontend/HLSL/BUILD.gn
index 4c1c613..fce564e 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Frontend/HLSL/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Frontend/HLSL/BUILD.gn
@@ -6,6 +6,7 @@ static_library("HLSL") {
]
sources = [
"CBuffer.cpp",
+ "HLSLBinding.cpp",
"HLSLResource.cpp",
"HLSLRootSignature.cpp",
"RootSignatureMetadata.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/unittests/Frontend/BUILD.gn b/llvm/utils/gn/secondary/llvm/unittests/Frontend/BUILD.gn
index c29277c..6890c48 100644
--- a/llvm/utils/gn/secondary/llvm/unittests/Frontend/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/unittests/Frontend/BUILD.gn
@@ -13,6 +13,7 @@ unittest("LLVMFrontendTests") {
"//llvm/lib/Testing/Support",
]
sources = [
+ "HLSLBindingTest.cpp",
"HLSLRootSignatureDumpTest.cpp",
"HLSLRootSignatureRangesTest.cpp",
"OpenACCTest.cpp",
diff --git a/llvm/utils/lit/lit/Test.py b/llvm/utils/lit/lit/Test.py
index 1bd5ba8..7290977 100644
--- a/llvm/utils/lit/lit/Test.py
+++ b/llvm/utils/lit/lit/Test.py
@@ -247,6 +247,9 @@ class Test:
# and will be honored when the test result is supplied.
self.xfails = []
+ # Exclude this test if it's xfail.
+ self.exclude_xfail = False
+
# If true, ignore all items in self.xfails.
self.xfail_not = False
diff --git a/llvm/utils/lit/lit/TestRunner.py b/llvm/utils/lit/lit/TestRunner.py
index 73db67a..e7cd707 100644
--- a/llvm/utils/lit/lit/TestRunner.py
+++ b/llvm/utils/lit/lit/TestRunner.py
@@ -2175,6 +2175,8 @@ def parseIntegratedTestScript(test, additional_parsers=[], require_script=True):
assert parsed["DEFINE:"] == script
assert parsed["REDEFINE:"] == script
test.xfails += parsed["XFAIL:"] or []
+ if test.exclude_xfail and test.isExpectedToFail():
+ return lit.Test.Result(Test.EXCLUDED, "excluding XFAIL tests")
test.requires += parsed["REQUIRES:"] or []
test.unsupported += parsed["UNSUPPORTED:"] or []
if parsed["ALLOW_RETRIES:"]:
diff --git a/llvm/utils/lit/lit/cl_arguments.py b/llvm/utils/lit/lit/cl_arguments.py
index 3292554..e889515 100644
--- a/llvm/utils/lit/lit/cl_arguments.py
+++ b/llvm/utils/lit/lit/cl_arguments.py
@@ -304,6 +304,16 @@ def parse_args():
default=os.environ.get("LIT_XFAIL_NOT", ""),
)
selection_group.add_argument(
+ "--exclude-xfail",
+ help="exclude XFAIL tests (unless they are in the --xfail-not list). "
+ "Note: This option is implemented in "
+ "lit.TestRunner.parseIntegratedTestScript and so will have no effect on "
+ "test formats that do not call that and do not implement the option "
+ "separately.",
+ default=False,
+ action="store_true",
+ )
+ selection_group.add_argument(
"--num-shards",
dest="numShards",
metavar="M",
diff --git a/llvm/utils/lit/lit/main.py b/llvm/utils/lit/lit/main.py
index 0939838..9650a0e 100755
--- a/llvm/utils/lit/lit/main.py
+++ b/llvm/utils/lit/lit/main.py
@@ -240,6 +240,8 @@ def mark_xfail(selected_tests, opts):
t.xfails += "*"
if test_file in opts.xfail_not or test_full_name in opts.xfail_not:
t.xfail_not = True
+ if opts.exclude_xfail:
+ t.exclude_xfail = True
def mark_excluded(discovered_tests, selected_tests):
diff --git a/llvm/utils/lit/tests/Inputs/xfail-cl/true-xfail-conditionally.txt b/llvm/utils/lit/tests/Inputs/xfail-cl/true-xfail-conditionally.txt
new file mode 100644
index 0000000..6fdecd6
--- /dev/null
+++ b/llvm/utils/lit/tests/Inputs/xfail-cl/true-xfail-conditionally.txt
@@ -0,0 +1,2 @@
+# XFAIL: this-does-not-exist
+# RUN: true \ No newline at end of file
diff --git a/llvm/utils/lit/tests/xfail-cl.py b/llvm/utils/lit/tests/xfail-cl.py
index ef1bb04..f1e0e33 100644
--- a/llvm/utils/lit/tests/xfail-cl.py
+++ b/llvm/utils/lit/tests/xfail-cl.py
@@ -5,6 +5,18 @@
# RUN: %{inputs}/xfail-cl \
# RUN: | FileCheck --check-prefix=CHECK-FILTER %s
+# RUN: %{lit} --xfail 'false.txt;false2.txt;top-level-suite :: b :: test.txt' \
+# RUN: --exclude-xfail \
+# RUN: %{inputs}/xfail-cl \
+# RUN: | FileCheck --check-prefixes=CHECK-EXCLUDED,CHECK-EXCLUDED-NOOVERRIDE %s
+
+# RUN: %{lit} --xfail 'false.txt;false2.txt;top-level-suite :: b :: test.txt' \
+# RUN: --xfail-not 'true-xfail.txt' \
+# RUN: --exclude-xfail \
+# RUN: %{inputs}/xfail-cl \
+# RUN: | FileCheck --check-prefixes=CHECK-EXCLUDED,CHECK-EXCLUDED-OVERRIDE %s
+
+
# RUN: env LIT_XFAIL='false.txt;false2.txt;top-level-suite :: b :: test.txt' \
# RUN: LIT_XFAIL_NOT='true-xfail.txt;top-level-suite :: a :: test-xfail.txt' \
# RUN: %{lit} %{inputs}/xfail-cl \
@@ -23,7 +35,7 @@
# END.
-# CHECK-FILTER: Testing: 10 tests, {{[0-9]*}} workers
+# CHECK-FILTER: Testing: 11 tests, {{[0-9]*}} workers
# CHECK-FILTER-DAG: {{^}}PASS: top-level-suite :: a :: test.txt
# CHECK-FILTER-DAG: {{^}}XFAIL: top-level-suite :: b :: test.txt
# CHECK-FILTER-DAG: {{^}}XFAIL: top-level-suite :: a :: false.txt
@@ -37,3 +49,17 @@
# CHECK-OVERRIDE: Testing: 1 tests, {{[0-9]*}} workers
# CHECK-OVERRIDE: {{^}}PASS: top-level-suite :: true-xfail.txt
+
+# CHECK-EXCLUDED: Testing: 11 tests, {{[0-9]*}} workers
+# CHECK-EXCLUDED-DAG: {{^}}EXCLUDED: top-level-suite :: a :: false.txt
+# CHECK-EXCLUDED-DAG: {{^}}EXCLUDED: top-level-suite :: a :: test-xfail.txt
+# CHECK-EXCLUDED-DAG: {{^}}PASS: top-level-suite :: a :: test.txt
+# CHECK-EXCLUDED-DAG: {{^}}EXCLUDED: top-level-suite :: b :: false.txt
+# CHECK-EXCLUDED-DAG: {{^}}EXCLUDED: top-level-suite :: b :: test-xfail.txt
+# CHECK-EXCLUDED-DAG: {{^}}EXCLUDED: top-level-suite :: b :: test.txt
+# CHECK-EXCLUDED-DAG: {{^}}EXCLUDED: top-level-suite :: false.txt
+# CHECK-EXCLUDED-DAG: {{^}}EXCLUDED: top-level-suite :: false2.txt
+# CHECK-EXCLUDED-DAG: {{^}}PASS: top-level-suite :: true-xfail-conditionally.txt
+# CHECK-EXCLUDED-NOOVERRIDE-DAG: {{^}}EXCLUDED: top-level-suite :: true-xfail.txt
+# CHECK-EXCLUDED-OVERRIDE-DAG: {{^}}PASS: top-level-suite :: true-xfail.txt
+# CHECK-EXCLUDED-DAG: {{^}}PASS: top-level-suite :: true.txt
diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td
index cf7596c..6e1baaf 100644
--- a/mlir/include/mlir/Conversion/Passes.td
+++ b/mlir/include/mlir/Conversion/Passes.td
@@ -196,6 +196,10 @@ def ConvertArithToSPIRVPass : Pass<"convert-arith-to-spirv"> {
"bool", /*default=*/"true",
"Emulate narrower scalar types with 32-bit ones if not supported by "
"the target">,
+ Option<"emulateUnsupportedFloatTypes", "emulate-unsupported-float-types",
+ "bool", /*default=*/"true",
+ "Emulate unsupported float types by representing them with integer "
+ "types of same bit width">
];
}
@@ -416,7 +420,11 @@ def ConvertControlFlowToSPIRVPass : Pass<"convert-cf-to-spirv"> {
Option<"emulateLT32BitScalarTypes", "emulate-lt-32-bit-scalar-types",
"bool", /*default=*/"true",
"Emulate narrower scalar types with 32-bit ones if not supported by"
- " the target">
+ " the target">,
+ Option<"emulateUnsupportedFloatTypes", "emulate-unsupported-float-types",
+ "bool", /*default=*/"true",
+ "Emulate unsupported float types by representing them with integer "
+ "types of same bit width">
];
}
@@ -500,7 +508,11 @@ def ConvertFuncToSPIRVPass : Pass<"convert-func-to-spirv"> {
Option<"emulateLT32BitScalarTypes", "emulate-lt-32-bit-scalar-types",
"bool", /*default=*/"true",
"Emulate narrower scalar types with 32-bit ones if not supported by"
- " the target">
+ " the target">,
+ Option<"emulateUnsupportedFloatTypes", "emulate-unsupported-float-types",
+ "bool", /*default=*/"true",
+ "Emulate unsupported float types by representing them with integer "
+ "types of same bit width">
];
}
@@ -1167,7 +1179,11 @@ def ConvertTensorToSPIRVPass : Pass<"convert-tensor-to-spirv"> {
Option<"emulateLT32BitScalarTypes", "emulate-lt-32-bit-scalar-types",
"bool", /*default=*/"true",
"Emulate narrower scalar types with 32-bit ones if not supported by"
- " the target">
+ " the target">,
+ Option<"emulateUnsupportedFloatTypes", "emulate-unsupported-float-types",
+ "bool", /*default=*/"true",
+ "Emulate unsupported float types by representing them with integer "
+ "types of same bit width">
];
}
diff --git a/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td b/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td
index a8455c2..b52f136 100644
--- a/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td
+++ b/mlir/include/mlir/Dialect/Async/IR/AsyncOps.td
@@ -38,7 +38,8 @@ def Async_ExecuteOp :
["getEntrySuccessorOperands",
"areTypesCompatible"]>,
AttrSizedOperandSegments,
- AutomaticAllocationScope]> {
+ AutomaticAllocationScope,
+ RecursiveMemoryEffects]> {
let summary = "Asynchronous execute operation";
let description = [{
The `body` region attached to the `async.execute` operation semantically
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.td
index b5ea8fc..107bf3e 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.td
@@ -83,6 +83,9 @@ def LLVM_Dialect : Dialect {
return "llvm.emit_c_interface";
}
+ /// Name of the module level assembly attribute.
+ static StringRef getModuleLevelAsmAttrName() { return "llvm.module_asm"; }
+
/// Name of the dependent libraries attribute.
static StringRef getDependentLibrariesAttrName() {
return "llvm.dependent_libraries";
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 45a8904..30df3b7 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -1990,10 +1990,30 @@ def NVVM_WMMAMmaOp : NVVM_Op<"wmma.mma">,
let hasVerifier = 1;
}
-def NVVM_StMatrixOp: NVVM_PTXBuilder_Op<"stmatrix">,
- Arguments<(ins LLVM_PointerShared:$ptr,
- Variadic<I32>:$sources,
- MMALayoutAttr:$layout)> {
+def LdStMatrixShapeAttr : NVVM_Attr<"LdStMatrixShape", "ld_st_matrix_shape"> {
+ let summary = "Matrix shape for ldmatrix and stmatrix";
+ let parameters = (ins "int":$m, "int":$n);
+ let assemblyFormat = "`<` struct(params) `>`";
+}
+
+def LdStMatrixEltTypeB16 : I32EnumAttrCase<"B16", 0, "b16">;
+def LdStMatrixEltTypeB8 : I32EnumAttrCase<"B8", 1, "b8">;
+def LdStMatrixEltTypeB8X16_B6X16_P32 : I32EnumAttrCase<"B8X16_B6X16_P32", 2, "b8x16.b6x16_p32">;
+def LdStMatrixEltTypeB8X16_B4X16_P64 : I32EnumAttrCase<"B8X16_B4X16_P64", 3, "b8x16.b4x16_p64">;
+
+def LdStMatrixEltType : I32EnumAttr<"LdStMatrixEltType", "Element type for ldmatrix and stmatrix",
+ [LdStMatrixEltTypeB16, LdStMatrixEltTypeB8,
+ LdStMatrixEltTypeB8X16_B6X16_P32, LdStMatrixEltTypeB8X16_B4X16_P64]> {
+ let genSpecializedAttr = 0;
+ let cppNamespace = "::mlir::NVVM";
+}
+def LdStMatrixEltTypeAttr : EnumAttr<NVVM_Dialect, LdStMatrixEltType, "ld_st_matrix_elt_type"> {
+ let assemblyFormat = "`<` $value `>`";
+}
+
+def NVVM_StMatrixOp: NVVM_Op<"stmatrix">,
+ Arguments<(ins LLVM_PointerShared: $ptr, Variadic<I32>:$sources, MMALayoutAttr:$layout,
+ LdStMatrixShapeAttr:$shape, LdStMatrixEltTypeAttr:$eltType)> {
let summary = "cooperative matrix store";
let description = [{
Collectively store one or more matrices across all threads in a warp to the
@@ -2001,21 +2021,12 @@ def NVVM_StMatrixOp: NVVM_PTXBuilder_Op<"stmatrix">,
[For more information, see PTX ISA](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-store-instruction-stmatrix)
}];
-
- let assemblyFormat = "$ptr `,` $sources attr-dict `:` type(operands)";
- let extraClassDefinition = [{
- std::string $cppClass::getPtx() {
- int d = getSources().size();
- std::string ptx = "stmatrix.sync.aligned";
- ptx += ".x" + std::to_string(d);
- if (getLayout() == NVVM::MMALayout::col)
- ptx += ".trans";
- if(d == 1) ptx += ".m8n8.shared.b16 [%0], {%1};";
- if(d == 2) ptx += ".m8n8.shared.b16 [%0], {%1, %2};";
- if(d == 4) ptx += ".m8n8.shared.b16 [%0], {%1, %2, %3, %4};";
- return ptx;
- }
+ string llvmBuilder = [{
+ auto operands = moduleTranslation.lookupValues(opInst.getOperands());
+ auto intId = getStMatrixIntrinsicId($layout, $sources.size(), $shape, $eltType);
+ createIntrinsicCall(builder, intId, operands, operands[0]->getType());
}];
+ let assemblyFormat = "$ptr `,` $sources attr-dict `:` type(operands)";
let hasVerifier = 1;
}
diff --git a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
index 2d15544..0c1c15b 100644
--- a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
+++ b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
@@ -87,6 +87,9 @@ def ExecuteRegionOp : SCF_Op<"execute_region", [
be accessed inside the op. The op's region can have multiple blocks and the
blocks can have multiple distinct terminators. Values returned from this op's
region define the op's results.
+ The optional 'no_inline' flag can be set to request the ExecuteRegionOp to be
+ preserved as much as possible and not being inlined in the parent block until
+ an explicit lowering step.
Example:
@@ -98,6 +101,14 @@ def ExecuteRegionOp : SCF_Op<"execute_region", [
}
}
+ // the same as above but with no_inline attribute
+ scf.for %i = 0 to 128 step %c1 {
+ %y = scf.execute_region -> i32 no_inline {
+ %x = load %A[%i] : memref<128xi32>
+ scf.yield %x : i32
+ }
+ }
+
affine.for %i = 0 to 100 {
"foo"() : () -> ()
%v = scf.execute_region -> i64 {
@@ -119,6 +130,10 @@ def ExecuteRegionOp : SCF_Op<"execute_region", [
```
}];
+ let arguments = (ins
+ UnitAttr:$no_inline
+ );
+
let results = (outs Variadic<AnyType>);
let regions = (region AnyRegion:$region);
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
index 9038326..9c74cff0 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
@@ -4448,6 +4448,7 @@ def SPIRV_OC_OpUMulExtended : I32EnumAttrCase<"OpUMulExtended"
def SPIRV_OC_OpSMulExtended : I32EnumAttrCase<"OpSMulExtended", 152>;
def SPIRV_OC_OpIsNan : I32EnumAttrCase<"OpIsNan", 156>;
def SPIRV_OC_OpIsInf : I32EnumAttrCase<"OpIsInf", 157>;
+def SPIRV_OC_OpIsFinite : I32EnumAttrCase<"OpIsFinite", 158>;
def SPIRV_OC_OpOrdered : I32EnumAttrCase<"OpOrdered", 162>;
def SPIRV_OC_OpUnordered : I32EnumAttrCase<"OpUnordered", 163>;
def SPIRV_OC_OpLogicalEqual : I32EnumAttrCase<"OpLogicalEqual", 164>;
@@ -4630,7 +4631,8 @@ def SPIRV_OpcodeAttr :
SPIRV_OC_OpVectorTimesMatrix, SPIRV_OC_OpMatrixTimesVector,
SPIRV_OC_OpMatrixTimesMatrix, SPIRV_OC_OpDot, SPIRV_OC_OpIAddCarry,
SPIRV_OC_OpISubBorrow, SPIRV_OC_OpUMulExtended, SPIRV_OC_OpSMulExtended,
- SPIRV_OC_OpIsNan, SPIRV_OC_OpIsInf, SPIRV_OC_OpOrdered, SPIRV_OC_OpUnordered,
+ SPIRV_OC_OpIsNan, SPIRV_OC_OpIsInf, SPIRV_OC_OpIsFinite,
+ SPIRV_OC_OpOrdered, SPIRV_OC_OpUnordered,
SPIRV_OC_OpLogicalEqual, SPIRV_OC_OpLogicalNotEqual, SPIRV_OC_OpLogicalOr,
SPIRV_OC_OpLogicalAnd, SPIRV_OC_OpLogicalNot, SPIRV_OC_OpSelect,
SPIRV_OC_OpIEqual, SPIRV_OC_OpINotEqual, SPIRV_OC_OpUGreaterThan,
diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td
index ab535d7..9331fc5 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td
@@ -403,6 +403,28 @@ def SPIRV_INotEqualOp : SPIRV_LogicalBinaryOp<"INotEqual",
// -----
+def SPIRV_IsFiniteOp : SPIRV_LogicalUnaryOp<"IsFinite", SPIRV_Float, []> {
+ let summary = "Result is true if x is an IEEE Finite, otherwise result is false";
+
+ let description = [{
+ Result Type must be a scalar or vector of Boolean type.
+
+ x must be a scalar or vector of floating-point type. It must have the
+ same number of components as Result Type.
+
+ Results are computed per component.
+
+ #### Example:
+
+ ```mlir
+ %2 = spirv.IsFinite %0: f32
+ %3 = spirv.IsFinite %1: vector<4xf32>
+ ```
+ }];
+}
+
+// -----
+
def SPIRV_IsInfOp : SPIRV_LogicalUnaryOp<"IsInf", SPIRV_Float, []> {
let summary = "Result is true if x is an IEEE Inf, otherwise result is false";
@@ -418,7 +440,7 @@ def SPIRV_IsInfOp : SPIRV_LogicalUnaryOp<"IsInf", SPIRV_Float, []> {
```mlir
%2 = spirv.IsInf %0: f32
- %3 = spirv.IsInf %1: vector<4xi32>
+ %3 = spirv.IsInf %1: vector<4xf32>
```
}];
}
@@ -442,7 +464,7 @@ def SPIRV_IsNanOp : SPIRV_LogicalUnaryOp<"IsNan", SPIRV_Float, []> {
```mlir
%2 = spirv.IsNan %0: f32
- %3 = spirv.IsNan %1: vector<4xi32>
+ %3 = spirv.IsNan %1: vector<4xf32>
```
}];
}
diff --git a/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h b/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h
index 3d22ec9..03ae54a 100644
--- a/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h
+++ b/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h
@@ -39,6 +39,10 @@ struct SPIRVConversionOptions {
/// The number of bits to store a boolean value.
unsigned boolNumBits{8};
+ /// Whether to emulate unsupported floats with integer types of same bit
+ /// width.
+ bool emulateUnsupportedFloatTypes{true};
+
/// How sub-byte values are storaged in memory.
SPIRVSubByteTypeStorage subByteTypeStorage{SPIRVSubByteTypeStorage::Packed};
diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 3885439..5d45508 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -2595,6 +2595,7 @@ def Vector_MaskOp : Vector_Op<"mask", [
def Vector_TransposeOp :
Vector_Op<"transpose", [Pure,
+ DeclareOpInterfaceMethods<InferIntRangeInterface, ["inferResultRanges"]>,
DeclareOpInterfaceMethods<VectorUnrollOpInterface, ["getShapeForUnroll"]>,
PredOpTrait<"operand and result have same element type",
TCresVTEtIsSameAsOpBase<0, 0>>]> {
@@ -2876,7 +2877,10 @@ def Vector_ScanOp :
// VectorStepOp
//===----------------------------------------------------------------------===//
-def Vector_StepOp : Vector_Op<"step", [Pure]> {
+def Vector_StepOp : Vector_Op<"step", [
+ Pure,
+ DeclareOpInterfaceMethods<InferIntRangeInterface, ["inferResultRanges"]>
+ ]> {
let summary = "A linear sequence of values from 0 to N";
let description = [{
A `step` operation produces an index vector, i.e. a 1-D vector of values of
diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
index 91d6b2a..75b16a87 100644
--- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
+++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
@@ -628,35 +628,71 @@ def XeGPU_PrefetchOp : XeGPU_Op<"prefetch", []> {
As compared to prefetch_nd, which works on non-scattered TensorDesc,
it works on scattered TensorDesc instead.
- Example:
+ Example 1:
```mlir
xegpu.prefetch %tdesc {l1_hint = #xegpu.cache_hint<cached>,
l2_hint = #xegpu.cache_hint<cached>,
l3_hint = #xegpu.cache_hint<cached>}
: !xegpu.tensor_desc<16xf16>
```
+
+ Example 2:
+ A variant accepts memref as base pointer and an offset instead of scattered TensorTdesc.
+ It combines "create scattered TensorTdesc" and "prefetch with scattered TensorTdesc".
+ The source operand could be a raw pointer (uint64_t).
+ Please refer to create_tdesc for the restriction of memref.
+ ```mlir
+ %a = memref.alloc() : memref<1024xf32>
+ %0 = arith.constant dense<[0, 16, 32, 64]> : vector<4xindex>
+ xegpu.prefetch %a[%0] {l1_hint = #xegpu.cache_hint<cached>,
+ l2_hint = #xegpu.cache_hint<cached>,
+ l3_hint = #xegpu.cache_hint<cached>}
+ : memref<1024xf32>, vector<4xindex>
+ ```
}];
- let arguments = (ins XeGPU_TensorDesc: $TensorDesc,
+ let arguments = (ins XeGPU_GatherScatterSourceType: $source,
+ Optional<XeGPU_OffsetType>: $offsets,
OptionalAttr<XeGPU_CacheHintAttr>: $l1_hint,
OptionalAttr<XeGPU_CacheHintAttr>: $l2_hint,
OptionalAttr<XeGPU_CacheHintAttr>: $l3_hint);
let extraClassDeclaration = extraBaseClassDeclaration # [{
+ Type getSourceType() {
+ return getSource().getType();
+ }
+
+ TypedValue<xegpu::TensorDescType> getTensorDesc() {
+ if (auto tdescType = getTensorDescType()) {
+ return llvm::cast<TypedValue<xegpu::TensorDescType>>(getSource());
+ }
+ return TypedValue<xegpu::TensorDescType>();
+ }
+
xegpu::TensorDescType getTensorDescType() {
- return getTensorDesc().getType();
+ return dyn_cast<xegpu::TensorDescType>(getSourceType());
}
}];
- let assemblyFormat = "$TensorDesc prop-dict attr-dict `:` qualified(type($TensorDesc))";
+ let assemblyFormat = [{
+ $source
+ (`[` $offsets^ `]`)?
+ prop-dict
+ attr-dict `:` type(operands)
+ }];
+
+ let builders = [
+ OpBuilder<(ins "Value": $source,
+ "xegpu::CachePolicyAttr": $l1_hint,
+ "xegpu::CachePolicyAttr": $l2_hint,
+ "xegpu::CachePolicyAttr": $l3_hint)>
+ ];
let hasVerifier = 1;
}
-def XeGPU_LoadGatherOp : XeGPU_Op<"load", [
- AllElementTypesMatch<["value", "TensorDesc"]>, MemoryEffects<[MemRead]>
- ]> {
+def XeGPU_LoadGatherOp : XeGPU_Op<"load", [MemoryEffects<[MemRead]>]> {
let summary = "load a set of scattered data points from memory.";
let description = [{ It (aka. load) load data per each work-item. The output
@@ -687,6 +723,7 @@ def XeGPU_LoadGatherOp : XeGPU_Op<"load", [
: !xegpu.tensor_desc<16x8xf32, #xegpu.scatter_tdesc_attr<memory_space=global, chunk_size=8>>,
vector<16xi1> -> vector<16x8xf32>
```
+
Example 3 (SIMT mode):
```mlir
%2 = xegpu.load %1, %0 <{l1_hint = #xegpu.cache_hint<cached>,
@@ -695,19 +732,48 @@ def XeGPU_LoadGatherOp : XeGPU_Op<"load", [
: !xegpu.tensor_desc<16x8xf32, #xegpu.scatter_tdesc_attr<memory_space=global, chunk_size=8>>
vector<16xi1> -> vector<8xf32>
```
+
+ Example 4:
+ A variant accepts memref as base pointer and an offset instead of scattered TensorTdesc.
+ It combines "create scattered TensorTdesc" and "load with scattered TensorTdesc".
+ The source operand could be a raw pointer (uint64_t). Please refer to create_tdesc
+ for the restriction of memref.
+ ```mlir
+ %a = memref.alloc() : memref<1024xf32>
+ %offsets = vector.step : vector<16xindex>
+ %mask = vector.constant_mask [16]: vector<16xi1>
+ %val = xegpu.load %a[%offsets], %mask {l1_hint = #xegpu.cache_hint<cached>,
+ l2_hint = #xegpu.cache_hint<cached>,
+ l3_hint = #xegpu.cache_hint<cached>}
+ : memref<1024xf32>, vector<16xi1>, vector<16xindex> -> vector<16xf32>
+ ```
}];
- let arguments = (ins XeGPU_TensorDesc: $TensorDesc,
+ let arguments = (ins XeGPU_GatherScatterSourceType: $source,
+ Optional<XeGPU_OffsetType>: $offsets,
XeGPU_MaskType: $mask,
+ OptionalAttr<I64Attr>: $chunk_size,
OptionalAttr<XeGPU_CacheHintAttr>: $l1_hint,
OptionalAttr<XeGPU_CacheHintAttr>: $l2_hint,
OptionalAttr<XeGPU_CacheHintAttr>: $l3_hint);
let results = (outs XeGPU_ValueType: $value);
let extraClassDeclaration = extraBaseClassDeclaration # [{
+
+ Type getSourceType() {
+ return getSource().getType();
+ }
+
+ TypedValue<xegpu::TensorDescType> getTensorDesc() {
+ if (auto tdescType = getTensorDescType()) {
+ return llvm::cast<TypedValue<xegpu::TensorDescType>>(getSource());
+ }
+ return TypedValue<xegpu::TensorDescType>();
+ }
+
xegpu::TensorDescType getTensorDescType() {
- return getTensorDesc().getType();
+ return dyn_cast<xegpu::TensorDescType>(getSourceType());
}
mlir::Type getElementType() {
@@ -725,15 +791,24 @@ def XeGPU_LoadGatherOp : XeGPU_Op<"load", [
}];
- let assemblyFormat = [{$TensorDesc `,` $mask prop-dict attr-dict
- `:` qualified(type($TensorDesc)) `,` type($mask) `->` type($value)}];
+ let assemblyFormat = [{
+ $source
+ (`[` $offsets^ `]`)? `,`
+ $mask prop-dict
+ attr-dict `:` type(operands) `->` type($value)
+ }];
+
+ let builders = [
+ OpBuilder<(ins "Type": $value, "Value": $source, "Value": $mask,
+ "xegpu::CachePolicyAttr": $l1_hint,
+ "xegpu::CachePolicyAttr": $l2_hint,
+ "xegpu::CachePolicyAttr": $l3_hint)>
+ ];
let hasVerifier = 1;
}
-def XeGPU_StoreScatterOp : XeGPU_Op<"store", [
- AllElementTypesMatch<["value", "TensorDesc"]>, MemoryEffects<[MemWrite]>
- ]> {
+def XeGPU_StoreScatterOp : XeGPU_Op<"store", [MemoryEffects<[MemWrite]>]> {
let summary = "store data to scattered memory locations.";
let description = [{ It (aka. store) stores data to scattered memory locations. The value is
typically a 1D vector. But when the chunk size of the TensorDesc is larger than 1, it will be
@@ -768,19 +843,49 @@ def XeGPU_StoreScatterOp : XeGPU_Op<"store", [
l3_hint = #xegpu.cache_hint<write_through>}>
: vector<8xf32>, !xegpu.tensor_desc<16x8xf32, #xegpu.scattered_tdesc_attr<chunk_size=8>> vector<16xi1>
```
+
+ Example 4:
+ A variant accepts memref as base pointer and an offset instead of scattered TensorTdesc.
+ It combines "create scattered TensorTdesc" and "store with scattered TensorTdesc".
+ The dest operand could be a raw pointer (uint64_t).
+ Please refer to create_tdesc for the restriction of memref.
+ ```mlir
+ %a = memref.alloc() : memref<1024xf32>
+ %val = arith.constant dense<0.0> : vector<16xf32>
+ %offsets = vector.step : vector<16xindex>
+ %mask = vector.constant_mask [16]: vector<16xi1>
+ xegpu.store %val, %a[%offsets], %mask {l1_hint = #xegpu.cache_hint<cached>,
+ l2_hint = #xegpu.cache_hint<cached>,
+ l3_hint = #xegpu.cache_hint<cached>}
+ : memref<1024xf32>, vector<16xi1>, vector<16xindex> -> vector<16xf32>
+ ```
+
}];
let arguments = (ins
XeGPU_ValueType: $value,
- XeGPU_TensorDesc: $TensorDesc,
+ XeGPU_GatherScatterSourceType: $dest,
+ Optional<XeGPU_OffsetType>: $offsets,
XeGPU_MaskType: $mask,
+ OptionalAttr<I64Attr>: $chunk_size,
OptionalAttr<XeGPU_CacheHintAttr>: $l1_hint,
OptionalAttr<XeGPU_CacheHintAttr>: $l2_hint,
OptionalAttr<XeGPU_CacheHintAttr>: $l3_hint);
let extraClassDeclaration = extraBaseClassDeclaration # [{
+ Type getDestType() {
+ return getDest().getType();
+ }
+
+ TypedValue<xegpu::TensorDescType> getTensorDesc() {
+ if (auto tdescType = getTensorDescType()) {
+ return llvm::cast<TypedValue<xegpu::TensorDescType>>(getDest());
+ }
+ return TypedValue<xegpu::TensorDescType>();
+ }
+
xegpu::TensorDescType getTensorDescType() {
- return getTensorDesc().getType();
+ return dyn_cast<xegpu::TensorDescType>(getDestType());
}
VectorType getValueType() {
@@ -792,8 +897,21 @@ def XeGPU_StoreScatterOp : XeGPU_Op<"store", [
}
}];
- let assemblyFormat = [{$value `,` $TensorDesc `,` $mask prop-dict attr-dict
- `:` type($value) `,` qualified(type($TensorDesc)) `,` type($mask)}];
+ let assemblyFormat = [{
+ $value `,`
+ $dest
+ (`[` $offsets^ `]`)? `,`
+ $mask
+ prop-dict
+ attr-dict `:` type(operands)
+ }];
+
+ let builders = [
+ OpBuilder<(ins "Value": $value, "Value": $dest, "Value": $mask,
+ "xegpu::CachePolicyAttr": $l1_hint,
+ "xegpu::CachePolicyAttr": $l2_hint,
+ "xegpu::CachePolicyAttr": $l3_hint)>
+ ];
let hasVerifier = 1;
}
diff --git a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUTypes.td b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUTypes.td
index 20916ae..b268cab 100644
--- a/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUTypes.td
+++ b/mlir/include/mlir/Dialect/XeGPU/IR/XeGPUTypes.td
@@ -189,6 +189,7 @@ def XeGPU_TensorDesc: XeGPUTypeDef<"TensorDesc", "tensor_desc",
let genVerifyDecl = 1;
}
+def XeGPU_GatherScatterSourceType : AnyTypeOf<[XeGPU_TensorDesc,Non0RankedMemRefOf<[XeGPU_ScalarType]>, UI64]>;
def XeGPU_Nbarrier: XeGPUTypeDef<"Nbarrier", "nbarrier", [], "mlir::Type"> {
let summary = "!xegpu.nbarrier a custom XeGPU type representing a barrier.";
diff --git a/mlir/include/mlir/IR/Operation.h b/mlir/include/mlir/IR/Operation.h
index edc8ab4..4f89f8b 100644
--- a/mlir/include/mlir/IR/Operation.h
+++ b/mlir/include/mlir/IR/Operation.h
@@ -1125,6 +1125,26 @@ inline raw_ostream &operator<<(raw_ostream &os,
return os;
}
+/// A wrapper class that allows for printing an operation with a custom
+/// AsmState, useful to act as a "stream modifier" to customize printing an
+/// operation with a stream using the operator<< overload, e.g.:
+/// llvm::dbgs() << OpWithState(op, OpPrintingFlags().skipRegions());
+class OpWithState {
+public:
+ OpWithState(Operation *op, AsmState &state) : op(op), theState(state) {}
+
+private:
+ Operation *op;
+ AsmState &theState;
+ friend raw_ostream &operator<<(raw_ostream &os, const OpWithState &op);
+};
+
+inline raw_ostream &operator<<(raw_ostream &os,
+ const OpWithState &opWithState) {
+ opWithState.op->print(os, const_cast<OpWithState &>(opWithState).theState);
+ return os;
+}
+
} // namespace mlir
namespace llvm {
diff --git a/mlir/include/mlir/Target/LLVMIR/ModuleImport.h b/mlir/include/mlir/Target/LLVMIR/ModuleImport.h
index b22ed60..09d819a 100644
--- a/mlir/include/mlir/Target/LLVMIR/ModuleImport.h
+++ b/mlir/include/mlir/Target/LLVMIR/ModuleImport.h
@@ -83,6 +83,10 @@ public:
/// specification.
void convertTargetTriple();
+ /// Converts the module level asm of the LLVM module to an MLIR module
+ /// level asm specification.
+ void convertModuleLevelAsm();
+
/// Stores the mapping between an LLVM value and its MLIR counterpart.
void mapValue(llvm::Value *llvm, Value mlir) { mapValue(llvm) = mlir; }
diff --git a/mlir/lib/Conversion/ArithToSPIRV/ArithToSPIRV.cpp b/mlir/lib/Conversion/ArithToSPIRV/ArithToSPIRV.cpp
index d43e681..265293b 100644
--- a/mlir/lib/Conversion/ArithToSPIRV/ArithToSPIRV.cpp
+++ b/mlir/lib/Conversion/ArithToSPIRV/ArithToSPIRV.cpp
@@ -99,6 +99,17 @@ static FloatAttr convertFloatAttr(FloatAttr srcAttr, FloatType dstType,
return builder.getF32FloatAttr(dstVal.convertToFloat());
}
+// Get in IntegerAttr from FloatAttr while preserving the bits.
+// Useful for converting float constants to integer constants while preserving
+// the bits.
+static IntegerAttr
+getIntegerAttrFromFloatAttr(FloatAttr floatAttr, Type dstType,
+ ConversionPatternRewriter &rewriter) {
+ APFloat floatVal = floatAttr.getValue();
+ APInt intVal = floatVal.bitcastToAPInt();
+ return rewriter.getIntegerAttr(dstType, intVal);
+}
+
/// Returns true if the given `type` is a boolean scalar or vector type.
static bool isBoolScalarOrVector(Type type) {
assert(type && "Not a valid type");
@@ -296,8 +307,18 @@ struct ConstantCompositeOpPattern final
SmallVector<Attribute, 8> elements;
if (isa<FloatType>(srcElemType)) {
for (FloatAttr srcAttr : dstElementsAttr.getValues<FloatAttr>()) {
- FloatAttr dstAttr =
- convertFloatAttr(srcAttr, cast<FloatType>(dstElemType), rewriter);
+ Attribute dstAttr = nullptr;
+ // Handle 8-bit float conversion to 8-bit integer.
+ auto *typeConverter = getTypeConverter<SPIRVTypeConverter>();
+ if (typeConverter->getOptions().emulateUnsupportedFloatTypes &&
+ srcElemType.getIntOrFloatBitWidth() == 8 &&
+ isa<IntegerType>(dstElemType)) {
+ dstAttr =
+ getIntegerAttrFromFloatAttr(srcAttr, dstElemType, rewriter);
+ } else {
+ dstAttr = convertFloatAttr(srcAttr, cast<FloatType>(dstElemType),
+ rewriter);
+ }
if (!dstAttr)
return failure();
elements.push_back(dstAttr);
@@ -361,11 +382,19 @@ struct ConstantScalarOpPattern final
// Floating-point types.
if (isa<FloatType>(srcType)) {
auto srcAttr = cast<FloatAttr>(cstAttr);
- auto dstAttr = srcAttr;
+ Attribute dstAttr = srcAttr;
// Floating-point types not supported in the target environment are all
// converted to float type.
- if (srcType != dstType) {
+ auto *typeConverter = getTypeConverter<SPIRVTypeConverter>();
+ if (typeConverter->getOptions().emulateUnsupportedFloatTypes &&
+ srcType.getIntOrFloatBitWidth() == 8 && isa<IntegerType>(dstType) &&
+ dstType.getIntOrFloatBitWidth() == 8) {
+ // If the source is an 8-bit float, convert it to a 8-bit integer.
+ dstAttr = getIntegerAttrFromFloatAttr(srcAttr, dstType, rewriter);
+ if (!dstAttr)
+ return failure();
+ } else if (srcType != dstType) {
dstAttr = convertFloatAttr(srcAttr, cast<FloatType>(dstType), rewriter);
if (!dstAttr)
return failure();
@@ -1352,6 +1381,7 @@ struct ConvertArithToSPIRVPass
SPIRVConversionOptions options;
options.emulateLT32BitScalarTypes = this->emulateLT32BitScalarTypes;
+ options.emulateUnsupportedFloatTypes = this->emulateUnsupportedFloatTypes;
SPIRVTypeConverter typeConverter(targetAttr, options);
// Use UnrealizedConversionCast as the bridge so that we don't need to pull
diff --git a/mlir/lib/Conversion/ControlFlowToSPIRV/ControlFlowToSPIRVPass.cpp b/mlir/lib/Conversion/ControlFlowToSPIRV/ControlFlowToSPIRVPass.cpp
index 03f4bf4..56b6181 100644
--- a/mlir/lib/Conversion/ControlFlowToSPIRV/ControlFlowToSPIRVPass.cpp
+++ b/mlir/lib/Conversion/ControlFlowToSPIRV/ControlFlowToSPIRVPass.cpp
@@ -43,6 +43,7 @@ void ConvertControlFlowToSPIRVPass::runOnOperation() {
SPIRVConversionOptions options;
options.emulateLT32BitScalarTypes = this->emulateLT32BitScalarTypes;
+ options.emulateUnsupportedFloatTypes = this->emulateUnsupportedFloatTypes;
SPIRVTypeConverter typeConverter(targetAttr, options);
// TODO: We should also take care of block argument type conversion.
diff --git a/mlir/lib/Conversion/FuncToSPIRV/FuncToSPIRVPass.cpp b/mlir/lib/Conversion/FuncToSPIRV/FuncToSPIRVPass.cpp
index 8ed9f65..c0439a4 100644
--- a/mlir/lib/Conversion/FuncToSPIRV/FuncToSPIRVPass.cpp
+++ b/mlir/lib/Conversion/FuncToSPIRV/FuncToSPIRVPass.cpp
@@ -42,6 +42,7 @@ void ConvertFuncToSPIRVPass::runOnOperation() {
SPIRVConversionOptions options;
options.emulateLT32BitScalarTypes = this->emulateLT32BitScalarTypes;
+ options.emulateUnsupportedFloatTypes = this->emulateUnsupportedFloatTypes;
SPIRVTypeConverter typeConverter(targetAttr, options);
RewritePatternSet patterns(context);
diff --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
index 1817861..3545acb 100644
--- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
+++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
@@ -385,6 +385,14 @@ LogicalResult GPUModuleConversion::matchAndRewrite(
if (auto attr = moduleOp->getAttrOfType<spirv::TargetEnvAttr>(
spirv::getTargetEnvAttrName()))
spvModule->setAttr(spirv::getTargetEnvAttrName(), attr);
+ if (ArrayAttr targets = moduleOp.getTargetsAttr()) {
+ for (Attribute targetAttr : targets)
+ if (auto spirvTargetEnvAttr =
+ dyn_cast<spirv::TargetEnvAttr>(targetAttr)) {
+ spvModule->setAttr(spirv::getTargetEnvAttrName(), spirvTargetEnvAttr);
+ break;
+ }
+ }
rewriter.eraseOp(moduleOp);
return success();
diff --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp
index a344f88..5eab057 100644
--- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp
+++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp
@@ -48,9 +48,36 @@ struct GPUToSPIRVPass final : impl::ConvertGPUToSPIRVBase<GPUToSPIRVPass> {
void runOnOperation() override;
private:
+ /// Queries the target environment from 'targets' attribute of the given
+ /// `moduleOp`.
+ spirv::TargetEnvAttr lookupTargetEnvInTargets(gpu::GPUModuleOp moduleOp);
+
+ /// Queries the target environment from 'targets' attribute of the given
+ /// `moduleOp` or returns target environment as returned by
+ /// `spirv::lookupTargetEnvOrDefault` if not provided by 'targets'.
+ spirv::TargetEnvAttr lookupTargetEnvOrDefault(gpu::GPUModuleOp moduleOp);
bool mapMemorySpace;
};
+spirv::TargetEnvAttr
+GPUToSPIRVPass::lookupTargetEnvInTargets(gpu::GPUModuleOp moduleOp) {
+ if (ArrayAttr targets = moduleOp.getTargetsAttr()) {
+ for (Attribute targetAttr : targets)
+ if (auto spirvTargetEnvAttr = dyn_cast<spirv::TargetEnvAttr>(targetAttr))
+ return spirvTargetEnvAttr;
+ }
+
+ return {};
+}
+
+spirv::TargetEnvAttr
+GPUToSPIRVPass::lookupTargetEnvOrDefault(gpu::GPUModuleOp moduleOp) {
+ if (spirv::TargetEnvAttr targetEnvAttr = lookupTargetEnvInTargets(moduleOp))
+ return targetEnvAttr;
+
+ return spirv::lookupTargetEnvOrDefault(moduleOp);
+}
+
void GPUToSPIRVPass::runOnOperation() {
MLIRContext *context = &getContext();
ModuleOp module = getOperation();
@@ -58,9 +85,8 @@ void GPUToSPIRVPass::runOnOperation() {
SmallVector<Operation *, 1> gpuModules;
OpBuilder builder(context);
- auto targetEnvSupportsKernelCapability = [](gpu::GPUModuleOp moduleOp) {
- Operation *gpuModule = moduleOp.getOperation();
- auto targetAttr = spirv::lookupTargetEnvOrDefault(gpuModule);
+ auto targetEnvSupportsKernelCapability = [this](gpu::GPUModuleOp moduleOp) {
+ auto targetAttr = lookupTargetEnvOrDefault(moduleOp);
spirv::TargetEnv targetEnv(targetAttr);
return targetEnv.allows(spirv::Capability::Kernel);
};
@@ -86,7 +112,7 @@ void GPUToSPIRVPass::runOnOperation() {
// TargetEnv attributes.
for (Operation *gpuModule : gpuModules) {
spirv::TargetEnvAttr targetAttr =
- spirv::lookupTargetEnvOrDefault(gpuModule);
+ lookupTargetEnvOrDefault(cast<gpu::GPUModuleOp>(gpuModule));
// Map MemRef memory space to SPIR-V storage class first if requested.
if (mapMemorySpace) {
diff --git a/mlir/lib/Conversion/MathToSPIRV/MathToSPIRV.cpp b/mlir/lib/Conversion/MathToSPIRV/MathToSPIRV.cpp
index a877ad2..1787e0a 100644
--- a/mlir/lib/Conversion/MathToSPIRV/MathToSPIRV.cpp
+++ b/mlir/lib/Conversion/MathToSPIRV/MathToSPIRV.cpp
@@ -488,7 +488,12 @@ namespace mlir {
void populateMathToSPIRVPatterns(const SPIRVTypeConverter &typeConverter,
RewritePatternSet &patterns) {
// Core patterns
- patterns.add<CopySignPattern>(typeConverter, patterns.getContext());
+ patterns
+ .add<CopySignPattern,
+ CheckedElementwiseOpPattern<math::IsInfOp, spirv::IsInfOp>,
+ CheckedElementwiseOpPattern<math::IsNaNOp, spirv::IsNanOp>,
+ CheckedElementwiseOpPattern<math::IsFiniteOp, spirv::IsFiniteOp>>(
+ typeConverter, patterns.getContext());
// GLSL patterns
patterns
diff --git a/mlir/lib/Conversion/TensorToSPIRV/TensorToSPIRVPass.cpp b/mlir/lib/Conversion/TensorToSPIRV/TensorToSPIRVPass.cpp
index f07386e..8cd650e 100644
--- a/mlir/lib/Conversion/TensorToSPIRV/TensorToSPIRVPass.cpp
+++ b/mlir/lib/Conversion/TensorToSPIRV/TensorToSPIRVPass.cpp
@@ -41,6 +41,7 @@ class ConvertTensorToSPIRVPass
SPIRVConversionOptions options;
options.emulateLT32BitScalarTypes = this->emulateLT32BitScalarTypes;
+ options.emulateUnsupportedFloatTypes = this->emulateUnsupportedFloatTypes;
SPIRVTypeConverter typeConverter(targetAttr, options);
RewritePatternSet patterns(context);
diff --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index 52cd0ce..e0977f5 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -813,15 +813,26 @@ LogicalResult NVVM::LdMatrixOp::verify() {
}
LogicalResult NVVM::StMatrixOp::verify() {
- unsigned addressSpace =
- llvm::cast<LLVM::LLVMPointerType>(getPtr().getType()).getAddressSpace();
- if (addressSpace != NVVM::kSharedMemorySpace)
- return emitOpError("expected source pointer in memory space 3");
-
int numMatrix = getSources().size();
if (numMatrix != 1 && numMatrix != 2 && numMatrix != 4)
return emitOpError("expected num attribute to be 1, 2 or 4");
+ int m = getShape().getM(), n = getShape().getN();
+ if (m == 8 && n == 8) {
+ if (getEltType() != NVVM::LdStMatrixEltType::B16) {
+ return emitOpError("expected element type to be B16 for 8x8 matrix");
+ }
+ } else if (m == 16 && n == 8) {
+ if (getEltType() != NVVM::LdStMatrixEltType::B8) {
+ return emitOpError("expected element type to be B8 for 16x8 matrix");
+ }
+ if (getLayout() != NVVM::MMALayout::col) {
+ return emitOpError("expected layout to be col for 16x8 matrix");
+ }
+ } else {
+ return emitOpError("expected shape to be 8x8 or 16x8");
+ }
+
return success();
}
diff --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
index 7f9ba1b..bf66ed0 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp
@@ -637,6 +637,7 @@ struct DropPadUnitDims : public OpRewritePattern<tensor::PadOp> {
}
ArrayRef<int64_t> sourceShape = padOp.getSourceType().getShape();
+ ArrayRef<int64_t> resultShape = padOp.getResultType().getShape();
int64_t padRank = sourceShape.size();
auto isStaticZero = [](OpFoldResult f) {
@@ -647,16 +648,18 @@ struct DropPadUnitDims : public OpRewritePattern<tensor::PadOp> {
allowedUnitDims.end());
llvm::SmallDenseSet<unsigned> unitDims;
SmallVector<int64_t> newShape;
+ SmallVector<int64_t> newResultShape;
SmallVector<OpFoldResult> newLowPad;
SmallVector<OpFoldResult> newHighPad;
- for (const auto [dim, size, low, high] :
- zip_equal(llvm::seq(static_cast<int64_t>(0), padRank), sourceShape,
- padOp.getMixedLowPad(), padOp.getMixedHighPad())) {
+ for (const auto [dim, size, outSize, low, high] : zip_equal(
+ llvm::seq(static_cast<int64_t>(0), padRank), sourceShape,
+ resultShape, padOp.getMixedLowPad(), padOp.getMixedHighPad())) {
if (unitDimsFilter.contains(dim) && size == 1 && isStaticZero(low) &&
isStaticZero(high)) {
unitDims.insert(dim);
} else {
newShape.push_back(size);
+ newResultShape.push_back(outSize);
newLowPad.push_back(low);
newHighPad.push_back(high);
}
@@ -686,8 +689,10 @@ struct DropPadUnitDims : public OpRewritePattern<tensor::PadOp> {
collapseValue(rewriter, padOp.getLoc(), padOp.getSource(), newShape,
reassociationMap, options.rankReductionStrategy);
- auto newPadOp = tensor::PadOp::create(
- rewriter, padOp.getLoc(), /*result=*/Type(), collapsedSource, newLowPad,
+ auto newResultType = RankedTensorType::get(
+ newResultShape, padOp.getResultType().getElementType());
+ auto newPadOp = rewriter.create<tensor::PadOp>(
+ padOp.getLoc(), /*result=*/newResultType, collapsedSource, newLowPad,
newHighPad, paddingVal, padOp.getNofold());
Value dest = padOp.getResult();
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index 793eec7..0860cea 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -1831,6 +1831,53 @@ vectorizeAsTensorPackOp(RewriterBase &rewriter, linalg::PackOp packOp,
return success();
}
+/// Given the re-associations, "collapses" the input Vector type
+///
+/// This is similar to CollapseShapeOp::inferCollapsedType with two notable
+/// differences:
+/// * We can safely assume that there are no dynamic sizes.
+/// * Scalable flags are updated alongside regular dims.
+///
+/// When collapsing scalable flags, conservatively avoids cases with two
+/// scalable dims. We could re-visit this in the future.
+///
+/// EXAMPLE:
+/// type = vector<4x16x[8]x16xf32>
+/// reassociation = [(d0, d1, d2, d3) -> (d0, d1),
+/// (d0, d1, d2, d3) -> (d2, d3)]
+/// Result:
+/// vector<64x[128]xf32>
+static VectorType getCollapsedVecType(VectorType type,
+ ArrayRef<AffineMap> reassociation) {
+ assert(type.getNumScalableDims() < 2 &&
+ "Collapsing more than 1 scalable dim is not supported ATM");
+
+ // Use the fact that reassociation is valid to simplify the logic: only use
+ // each map's rank.
+ assert(isReassociationValid(reassociation) && "invalid reassociation");
+
+ auto shape = type.getShape();
+ auto scalableFlags = type.getScalableDims();
+ SmallVector<int64_t> newShape;
+ SmallVector<bool> newScalableFlags;
+
+ unsigned currentDim = 0;
+ for (AffineMap m : reassociation) {
+ unsigned dim = m.getNumResults();
+ int64_t size = 1;
+ bool flag = false;
+ for (unsigned d = 0; d < dim; ++d) {
+ size *= shape[currentDim + d];
+ flag |= scalableFlags[currentDim + d];
+ }
+ newShape.push_back(size);
+ newScalableFlags.push_back(flag);
+ currentDim += dim;
+ }
+
+ return VectorType::get(newShape, type.getElementType(), newScalableFlags);
+}
+
/// Vectorize a `linalg::UnPackOp` to these 4 Ops:
/// Vector::TransferReadOp - Reads a vector from the source tensor
/// vector::TransposeOp - Transpose the Source tensor
@@ -1928,30 +1975,18 @@ vectorizeAsTensorUnpackOp(RewriterBase &rewriter, linalg::UnPackOp unpackOp,
PackingMetadata packMetadata;
SmallVector<int64_t> lastDimToInsertPosPerm =
getUnPackInverseSrcPerm(unpackOp, packMetadata);
- ShapedType maskedOpShapedType = cast<ShapedType>(readResult.getType());
- SmallVector<int64_t> stripMineShape(maskedOpShapedType.getShape());
- mlir::Type stripMineElemType = maskedOpShapedType.getElementType();
- applyPermutationToVector(stripMineShape, lastDimToInsertPosPerm);
- RankedTensorType stripMineTensorType =
- RankedTensorType::get(stripMineShape, stripMineElemType);
// Transpose the appropriate rows to match output.
vector::TransposeOp transposeOp = vector::TransposeOp::create(
rewriter, loc, readResult, lastDimToInsertPosPerm);
// Collapse the vector to the size required by result.
- RankedTensorType collapsedType = tensor::CollapseShapeOp::inferCollapsedType(
- stripMineTensorType, packMetadata.reassociations);
- mlir::VectorType vecCollapsedType =
- VectorType::get(collapsedType.getShape(), collapsedType.getElementType());
+ VectorType collapsedVecType = getCollapsedVecType(
+ transposeOp.getType(),
+ getSymbolLessAffineMaps(convertReassociationIndicesToExprs(
+ rewriter.getContext(), packMetadata.reassociations)));
vector::ShapeCastOp shapeCastOp = vector::ShapeCastOp::create(
- rewriter, loc, vecCollapsedType, transposeOp->getResult(0));
-
- // writeVectorSizes had to match the shapecast shape for dynamic sizes,
- // otherwise the validator complains that the mask size is invalid.
- SmallVector<int64_t> writeVectorSizes(
- unpackOp.getDestType().hasStaticShape()
- ? vectorSizes
- : shapeCastOp.getResultVectorType().getShape());
+ rewriter, loc, collapsedVecType, transposeOp->getResult(0));
+
Operation *write = createWriteOrMaskedWrite(
rewriter, loc, shapeCastOp.getResult(), unpackOp.getDest(),
/*writeIndices=*/{}, useInBoundsInsteadOfMasking);
diff --git a/mlir/lib/Dialect/SCF/IR/SCF.cpp b/mlir/lib/Dialect/SCF/IR/SCF.cpp
index 759e58b..0262a1b 100644
--- a/mlir/lib/Dialect/SCF/IR/SCF.cpp
+++ b/mlir/lib/Dialect/SCF/IR/SCF.cpp
@@ -137,6 +137,9 @@ ParseResult ExecuteRegionOp::parse(OpAsmParser &parser,
if (parser.parseOptionalArrowTypeList(result.types))
return failure();
+ if (succeeded(parser.parseOptionalKeyword("no_inline")))
+ result.addAttribute("no_inline", parser.getBuilder().getUnitAttr());
+
// Introduce the body region and parse it.
Region *body = result.addRegion();
if (parser.parseRegion(*body, /*arguments=*/{}, /*argTypes=*/{}) ||
@@ -148,8 +151,9 @@ ParseResult ExecuteRegionOp::parse(OpAsmParser &parser,
void ExecuteRegionOp::print(OpAsmPrinter &p) {
p.printOptionalArrowTypeList(getResultTypes());
-
p << ' ';
+ if (getNoInline())
+ p << "no_inline ";
p.printRegion(getRegion(),
/*printEntryBlockArgs=*/false,
/*printBlockTerminators=*/true);
@@ -184,7 +188,7 @@ struct SingleBlockExecuteInliner : public OpRewritePattern<ExecuteRegionOp> {
LogicalResult matchAndRewrite(ExecuteRegionOp op,
PatternRewriter &rewriter) const override {
- if (!op.getRegion().hasOneBlock())
+ if (!op.getRegion().hasOneBlock() || op.getNoInline())
return failure();
replaceOpWithRegion(rewriter, op, op.getRegion());
return success();
diff --git a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
index 35ec019..8f4c4cc 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
@@ -182,6 +182,14 @@ getTypeNumBytes(const SPIRVConversionOptions &options, Type type) {
return bitWidth / 8;
}
+ // Handle 8-bit floats.
+ if (options.emulateUnsupportedFloatTypes && isa<FloatType>(type)) {
+ auto bitWidth = type.getIntOrFloatBitWidth();
+ if (bitWidth == 8)
+ return bitWidth / 8;
+ return std::nullopt;
+ }
+
if (auto complexType = dyn_cast<ComplexType>(type)) {
auto elementSize = getTypeNumBytes(options, complexType.getElementType());
if (!elementSize)
@@ -318,6 +326,44 @@ static Type convertSubByteIntegerType(const SPIRVConversionOptions &options,
type.getSignedness());
}
+/// Converts 8-bit float types to integer types with the same bit width.
+/// Returns a nullptr for unsupported 8-bit float types.
+static Type convert8BitFloatType(const SPIRVConversionOptions &options,
+ FloatType type) {
+ if (!options.emulateUnsupportedFloatTypes)
+ return nullptr;
+ // F8 types are converted to integer types with the same bit width.
+ if (isa<Float8E5M2Type, Float8E4M3Type, Float8E4M3FNType, Float8E5M2FNUZType,
+ Float8E4M3FNUZType, Float8E4M3B11FNUZType, Float8E3M4Type,
+ Float8E8M0FNUType>(type))
+ return IntegerType::get(type.getContext(), type.getWidth());
+ LLVM_DEBUG(llvm::dbgs() << "unsupported 8-bit float type: " << type << "\n");
+ return nullptr;
+}
+
+/// Returns a type with the same shape but with any 8-bit float element type
+/// converted to the same bit width integer type. This is a noop when the
+/// element type is not the 8-bit float type or emulation flag is set to false.
+static ShapedType
+convertShaped8BitFloatType(ShapedType type,
+ const SPIRVConversionOptions &options) {
+ if (!options.emulateUnsupportedFloatTypes)
+ return type;
+ Type srcElementType = type.getElementType();
+ Type convertedElementType = nullptr;
+ // F8 types are converted to integer types with the same bit width.
+ if (isa<Float8E5M2Type, Float8E4M3Type, Float8E4M3FNType, Float8E5M2FNUZType,
+ Float8E4M3FNUZType, Float8E4M3B11FNUZType, Float8E3M4Type,
+ Float8E8M0FNUType>(srcElementType))
+ convertedElementType = IntegerType::get(
+ type.getContext(), srcElementType.getIntOrFloatBitWidth());
+
+ if (!convertedElementType)
+ return type;
+
+ return type.clone(convertedElementType);
+}
+
/// Returns a type with the same shape but with any index element type converted
/// to the matching integer type. This is a noop when the element type is not
/// the index type.
@@ -337,6 +383,7 @@ convertVectorType(const spirv::TargetEnv &targetEnv,
const SPIRVConversionOptions &options, VectorType type,
std::optional<spirv::StorageClass> storageClass = {}) {
type = cast<VectorType>(convertIndexElementType(type, options));
+ type = cast<VectorType>(convertShaped8BitFloatType(type, options));
auto scalarType = dyn_cast_or_null<spirv::ScalarType>(type.getElementType());
if (!scalarType) {
// If this is not a spec allowed scalar type, try to handle sub-byte integer
@@ -433,6 +480,7 @@ static Type convertTensorType(const spirv::TargetEnv &targetEnv,
}
type = cast<TensorType>(convertIndexElementType(type, options));
+ type = cast<TensorType>(convertShaped8BitFloatType(type, options));
auto scalarType = dyn_cast_or_null<spirv::ScalarType>(type.getElementType());
if (!scalarType) {
LLVM_DEBUG(llvm::dbgs()
@@ -596,6 +644,10 @@ static Type convertMemrefType(const spirv::TargetEnv &targetEnv,
} else if (auto indexType = dyn_cast<IndexType>(elementType)) {
type = cast<MemRefType>(convertIndexElementType(type, options));
arrayElemType = type.getElementType();
+ } else if (auto floatType = dyn_cast<FloatType>(elementType)) {
+ // Hnadle 8 bit float types.
+ type = cast<MemRefType>(convertShaped8BitFloatType(type, options));
+ arrayElemType = type.getElementType();
} else {
LLVM_DEBUG(
llvm::dbgs()
@@ -1444,6 +1496,8 @@ SPIRVTypeConverter::SPIRVTypeConverter(spirv::TargetEnvAttr targetAttr,
addConversion([this](FloatType floatType) -> std::optional<Type> {
if (auto scalarType = dyn_cast<spirv::ScalarType>(floatType))
return convertScalarType(this->targetEnv, this->options, scalarType);
+ if (floatType.getWidth() == 8)
+ return convert8BitFloatType(this->options, floatType);
return Type();
});
diff --git a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
index 6d2cbb5..e3cba388 100644
--- a/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
+++ b/mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp
@@ -452,18 +452,14 @@ struct ClampIsNoOp : public OpRewritePattern<tosa::ClampOp> {
auto inputType = llvm::dyn_cast<RankedTensorType>(op.getInput().getType());
auto inputElementType = inputType.getElementType();
- if (!inputType.hasStaticShape()) {
- return failure();
- }
-
if (isa<FloatType>(inputElementType)) {
// Unlike integer types, floating point types can represent infinity.
- auto minClamp =
+ const auto minClamp =
llvm::cast<mlir::FloatAttr>(op.getMinValAttr()).getValue();
- auto maxClamp =
+ const auto maxClamp =
llvm::cast<mlir::FloatAttr>(op.getMaxValAttr()).getValue();
- bool isMin = minClamp.isNegInfinity();
- bool isMax = maxClamp.isInfinity();
+ const bool isMin = minClamp.isNegInfinity();
+ const bool isMax = maxClamp.isInfinity();
if (isMin && isMax) {
rewriter.replaceOp(op, input);
@@ -472,18 +468,19 @@ struct ClampIsNoOp : public OpRewritePattern<tosa::ClampOp> {
return failure();
}
- if (inputElementType.isUnsignedInteger()) {
- int64_t minClamp =
- llvm::cast<mlir::IntegerAttr>(op.getMinValAttr()).getUInt();
- int64_t maxClamp =
- llvm::cast<mlir::IntegerAttr>(op.getMaxValAttr()).getUInt();
+ // i1 types are boolean in TOSA
+ const bool isBoolean = inputElementType.isInteger(1);
+ if (inputElementType.isUnsignedInteger() || isBoolean) {
+ const int64_t minClamp = llvm::cast<mlir::IntegerAttr>(op.getMinValAttr())
+ .getValue()
+ .getZExtValue();
+ const int64_t maxClamp = llvm::cast<mlir::IntegerAttr>(op.getMaxValAttr())
+ .getValue()
+ .getZExtValue();
- int64_t intMin =
- APInt::getMinValue(inputElementType.getIntOrFloatBitWidth())
- .getZExtValue();
- int64_t intMax =
- APInt::getMaxValue(inputElementType.getIntOrFloatBitWidth())
- .getZExtValue();
+ const unsigned bitWidth = inputElementType.getIntOrFloatBitWidth();
+ const int64_t intMin = APInt::getMinValue(bitWidth).getZExtValue();
+ const int64_t intMax = APInt::getMaxValue(bitWidth).getZExtValue();
if (minClamp <= intMin && maxClamp >= intMax) {
rewriter.replaceOp(op, input);
@@ -493,17 +490,14 @@ struct ClampIsNoOp : public OpRewritePattern<tosa::ClampOp> {
}
if (llvm::isa<IntegerType>(inputElementType)) {
- int64_t minClamp =
+ const int64_t minClamp =
llvm::cast<mlir::IntegerAttr>(op.getMinValAttr()).getInt();
- int64_t maxClamp =
+ const int64_t maxClamp =
llvm::cast<mlir::IntegerAttr>(op.getMaxValAttr()).getInt();
- int64_t intMin =
- APInt::getSignedMinValue(inputElementType.getIntOrFloatBitWidth())
- .getSExtValue();
- int64_t intMax =
- APInt::getSignedMaxValue(inputElementType.getIntOrFloatBitWidth())
- .getSExtValue();
+ const unsigned bitWidth = inputElementType.getIntOrFloatBitWidth();
+ const int64_t intMin = APInt::getSignedMinValue(bitWidth).getSExtValue();
+ const int64_t intMax = APInt::getSignedMaxValue(bitWidth).getSExtValue();
if (minClamp <= intMin && maxClamp >= intMax) {
rewriter.replaceOp(op, input);
diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
index 8789f55..a21b5ba 100644
--- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp
@@ -5916,14 +5916,13 @@ OpFoldResult ShapeCastOp::fold(FoldAdaptor adaptor) {
}
// shape_cast(constant) -> constant
- if (auto splatAttr =
- llvm::dyn_cast_if_present<SplatElementsAttr>(adaptor.getSource()))
- return splatAttr.reshape(getType());
+ if (auto denseAttr =
+ dyn_cast_if_present<DenseElementsAttr>(adaptor.getSource()))
+ return denseAttr.reshape(getType());
// shape_cast(poison) -> poison
- if (llvm::dyn_cast_if_present<ub::PoisonAttr>(adaptor.getSource())) {
+ if (llvm::dyn_cast_if_present<ub::PoisonAttr>(adaptor.getSource()))
return ub::PoisonAttr::get(getContext());
- }
return {};
}
@@ -6316,6 +6315,11 @@ std::optional<SmallVector<int64_t, 4>> TransposeOp::getShapeForUnroll() {
return llvm::to_vector<4>(getResultVectorType().getShape());
}
+void TransposeOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
+ SetIntRangeFn setResultRanges) {
+ setResultRanges(getResult(), argRanges.front());
+}
+
namespace {
// Rewrites two back-to-back TransposeOp operations into a single TransposeOp.
@@ -7198,6 +7202,23 @@ Value mlir::vector::makeArithReduction(OpBuilder &b, Location loc,
}
//===----------------------------------------------------------------------===//
+// StepOp
+//===----------------------------------------------------------------------===//
+
+void StepOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
+ SetIntRangeFn setResultRanges) {
+ auto resultType = cast<VectorType>(getType());
+ if (resultType.isScalable()) {
+ return;
+ }
+ unsigned bitwidth = ConstantIntRanges::getStorageBitwidth(resultType);
+ APInt zero(bitwidth, 0);
+ APInt high(bitwidth, resultType.getDimSize(0) - 1);
+ ConstantIntRanges result = {zero, high, zero, high};
+ setResultRanges(getResult(), result);
+}
+
+//===----------------------------------------------------------------------===//
// Vector Masking Utilities
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp
index cb8e566..dedc3b3 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorBroadcast.cpp
@@ -28,7 +28,10 @@ using namespace mlir;
using namespace mlir::vector;
namespace {
-/// Progressive lowering of BroadcastOp.
+
+/// Convert a vector.broadcast with a vector operand to a lower rank
+/// vector.broadcast. vector.broadcast with a scalar operand is expected to be
+/// convertible to the lower level target dialect (LLVM, SPIR-V, etc.) directly.
class BroadcastOpLowering : public OpRewritePattern<vector::BroadcastOp> {
public:
using OpRewritePattern::OpRewritePattern;
@@ -40,20 +43,23 @@ public:
VectorType srcType = dyn_cast<VectorType>(op.getSourceType());
Type eltType = dstType.getElementType();
- // Scalar to any vector can use splat.
- if (!srcType) {
- rewriter.replaceOpWithNewOp<vector::SplatOp>(op, dstType, op.getSource());
- return success();
- }
+ // A broadcast from a scalar is considered to be in the lowered form.
+ if (!srcType)
+ return rewriter.notifyMatchFailure(
+ op, "broadcast from scalar already in lowered form");
// Determine rank of source and destination.
int64_t srcRank = srcType.getRank();
int64_t dstRank = dstType.getRank();
- // Stretching scalar inside vector (e.g. vector<1xf32>) can use splat.
+ // Here we are broadcasting to a rank-1 vector. Ensure that the source is a
+ // scalar.
if (srcRank <= 1 && dstRank == 1) {
- Value ext = vector::ExtractOp::create(rewriter, loc, op.getSource());
- rewriter.replaceOpWithNewOp<vector::SplatOp>(op, dstType, ext);
+ SmallVector<int64_t> fullRankPosition(srcRank, 0);
+ Value ext = vector::ExtractOp::create(rewriter, loc, op.getSource(),
+ fullRankPosition);
+ assert(!isa<VectorType>(ext.getType()) && "expected scalar");
+ rewriter.replaceOpWithNewOp<vector::BroadcastOp>(op, dstType, ext);
return success();
}
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
index 4baeb11..2cf8f0b 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
@@ -468,7 +468,7 @@ struct TransferReadToVectorLoadLowering
read, "vector type is not rank 1, can't create masked load, needs "
"VectorToSCF");
- Value fill = vector::SplatOp::create(
+ Value fill = vector::BroadcastOp::create(
rewriter, read.getLoc(), unbroadcastedVectorType, read.getPadding());
res = vector::MaskedLoadOp::create(
rewriter, read.getLoc(), unbroadcastedVectorType, read.getBase(),
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp
index 72352d7..cbb9d4b 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorInsertExtractStridedSliceRewritePatterns.cpp
@@ -303,7 +303,7 @@ public:
// Extract/insert on a lower ranked extract strided slice op.
Value zero = arith::ConstantOp::create(rewriter, loc, elemType,
rewriter.getZeroAttr(elemType));
- Value res = SplatOp::create(rewriter, loc, dstType, zero);
+ Value res = BroadcastOp::create(rewriter, loc, dstType, zero);
for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
off += stride, ++idx) {
Value one = ExtractOp::create(rewriter, loc, op.getVector(), off);
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index 7500bf7..2269a40 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -939,7 +939,7 @@ public:
Value zero = arith::ConstantOp::create(rewriter, loc, elemType,
rewriter.getZeroAttr(elemType));
- Value res = SplatOp::create(rewriter, loc, castDstType, zero);
+ Value res = BroadcastOp::create(rewriter, loc, castDstType, zero);
SmallVector<int64_t> sliceShape = {castDstLastDim};
SmallVector<int64_t> strides = {1};
@@ -987,6 +987,23 @@ static Type cloneOrReplace(Type type, Type newElementType) {
return newElementType;
}
+/// If `value` is the result of a splat or broadcast operation, return the input
+/// of the splat/broadcast operation.
+static Value getBroadcastLikeSource(Value value) {
+
+ Operation *op = value.getDefiningOp();
+ if (!op)
+ return {};
+
+ if (auto broadcast = dyn_cast<vector::BroadcastOp>(op))
+ return broadcast.getSource();
+
+ if (auto splat = dyn_cast<vector::SplatOp>(op))
+ return splat.getInput();
+
+ return {};
+}
+
/// Reorders elementwise(broadcast/splat) to broadcast(elementwise). Ex:
///
/// Example:
@@ -1026,39 +1043,37 @@ struct ReorderElementwiseOpsOnBroadcast final
}
Type resultElemType = resultType.getElementType();
+
// Get the type of the first non-constant operand
- Operation *firstBroadcastOrSplat = nullptr;
+ Value splatSource;
for (Value operand : op->getOperands()) {
Operation *definingOp = operand.getDefiningOp();
if (!definingOp)
return failure();
if (definingOp->hasTrait<OpTrait::ConstantLike>())
continue;
- if (!isa<vector::BroadcastOp, vector::SplatOp>(*definingOp))
- return failure();
- firstBroadcastOrSplat = definingOp;
+ splatSource = getBroadcastLikeSource(operand);
break;
}
- if (!firstBroadcastOrSplat)
+ if (!splatSource)
return failure();
- Type unbroadcastResultType = cloneOrReplace(
- firstBroadcastOrSplat->getOperand(0).getType(), resultElemType);
+ Type unbroadcastResultType =
+ cloneOrReplace(splatSource.getType(), resultElemType);
// Make sure that all operands are broadcast from identically-shaped types:
// * scalar (`vector.broadcast` + `vector.splat`), or
// * vector (`vector.broadcast`).
// Otherwise the re-ordering wouldn't be safe.
- if (!llvm::all_of(op->getOperands(), [&unbroadcastResultType](Value val) {
- if (auto bcastOp = val.getDefiningOp<vector::BroadcastOp>())
- return haveSameShapeAndScaling(bcastOp.getOperand().getType(),
- unbroadcastResultType);
- if (auto splatOp = val.getDefiningOp<vector::SplatOp>())
- return haveSameShapeAndScaling(splatOp.getOperand().getType(),
- unbroadcastResultType);
+ if (!llvm::all_of(op->getOperands(), [splatSource](Value val) {
+ if (auto source = getBroadcastLikeSource(val))
+ return haveSameShapeAndScaling(source.getType(),
+ splatSource.getType());
SplatElementsAttr splatConst;
return matchPattern(val, m_Constant(&splatConst));
})) {
- return failure();
+ return rewriter.notifyMatchFailure(
+ op,
+ "not all operands are constants or broadcasts from the same type");
}
// Collect the source values before broadcasting
@@ -1287,15 +1302,17 @@ public:
return rewriter.notifyMatchFailure(
op, "only 1-element vectors are supported");
- Operation *splat = op.getValueToStore().getDefiningOp();
- if (!isa_and_present<vector::BroadcastOp, vector::SplatOp>(splat))
- return rewriter.notifyMatchFailure(op, "neither a splat nor a broadcast");
+ Value toStore = op.getValueToStore();
+ Value source = getBroadcastLikeSource(toStore);
+ if (!source)
+ return rewriter.notifyMatchFailure(
+ op, "value to store is not from a broadcast");
// Checking for single use so we can remove splat.
+ Operation *splat = toStore.getDefiningOp();
if (!splat->hasOneUse())
return rewriter.notifyMatchFailure(op, "expected single op use");
- Value source = splat->getOperand(0);
Value base = op.getBase();
ValueRange indices = op.getIndices();
@@ -1345,13 +1362,13 @@ static Value buildVectorComparison(PatternRewriter &rewriter, Operation *op,
// Add in an offset if requested.
if (off) {
Value o = getValueOrCreateCastToIndexLike(rewriter, loc, idxType, *off);
- Value ov = vector::SplatOp::create(rewriter, loc, indices.getType(), o);
+ Value ov = vector::BroadcastOp::create(rewriter, loc, indices.getType(), o);
indices = arith::AddIOp::create(rewriter, loc, ov, indices);
}
// Construct the vector comparison.
Value bound = getValueOrCreateCastToIndexLike(rewriter, loc, idxType, b);
Value bounds =
- vector::SplatOp::create(rewriter, loc, indices.getType(), bound);
+ vector::BroadcastOp::create(rewriter, loc, indices.getType(), bound);
return arith::CmpIOp::create(rewriter, loc, arith::CmpIPredicate::slt,
indices, bounds);
}
diff --git a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
index 704deea..33450f3 100644
--- a/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
+++ b/mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
@@ -110,6 +110,34 @@ isValidGatherScatterParams(Type maskTy, VectorType valueTy,
return success();
}
+static LogicalResult
+isValidGatherScatterBufferParams(Type maskTy, VectorType valueTy,
+ int64_t chunkSize,
+ function_ref<InFlightDiagnostic()> emitError) {
+
+ if (!valueTy)
+ return emitError() << "Expecting a vector type result.";
+
+ auto maskShape = getShapeOf(maskTy);
+ auto valueShape = getShapeOf(valueTy);
+
+ // a valid shape for SIMT case
+ if (valueTy.getRank() == 1) {
+ if (valueTy.getNumElements() != chunkSize)
+ return emitError() << "value elements must match chunk size " << chunkSize
+ << " for SIMT code.";
+ return success();
+ }
+
+ llvm::SmallVector<int64_t> expectedMaskShape(valueShape);
+ if (chunkSize > 1)
+ expectedMaskShape.pop_back();
+ if (expectedMaskShape != maskShape)
+ return emitError() << "Mask should match value except the chunk size dim.";
+
+ return success();
+}
+
//===----------------------------------------------------------------------===//
// XeGPU_CreateNdDescOp
//===----------------------------------------------------------------------===//
@@ -644,9 +672,14 @@ LogicalResult CreateDescOp::verify() {
//===----------------------------------------------------------------------===//
LogicalResult PrefetchOp::verify() {
auto tdescTy = getTensorDescType();
- if (!tdescTy.isScattered())
+
+ if (tdescTy && !tdescTy.isScattered())
return emitOpError("Expects a scattered TensorDesc.\n");
+ if (!tdescTy && getRankOf(getSource()) > 1)
+ return emitOpError(
+ "Expecting the source is a 1D memref or pointer (uint64_t).");
+
if (!isReadHintOrNone(getL1HintAttr()))
return emitOpError("invalid l1_hint: ") << getL1HintAttr();
@@ -659,6 +692,13 @@ LogicalResult PrefetchOp::verify() {
return success();
}
+void PrefetchOp::build(OpBuilder &builder, OperationState &state, Value source,
+ xegpu::CachePolicyAttr l1_hint,
+ xegpu::CachePolicyAttr l2_hint,
+ xegpu::CachePolicyAttr l3_hint) {
+ build(builder, state, source, Value(), l1_hint, l2_hint, l3_hint);
+}
+
//===----------------------------------------------------------------------===//
// XeGPU_LoadGatherOp
//===----------------------------------------------------------------------===//
@@ -667,6 +707,13 @@ LogicalResult LoadGatherOp::verify() {
auto maskTy = getMaskType();
auto valueTy = getValueType();
+ if (tdescTy && !tdescTy.isScattered())
+ return emitOpError("Expects a scattered TensorDesc.");
+
+ if (!tdescTy && getRankOf(getSource()) > 1)
+ return emitOpError(
+ "Expecting the source is a 1D memref or pointer (uint64_t).");
+
if (!isReadHintOrNone(getL1HintAttr()))
return emitOpError("invalid l1_hint: ") << getL1HintAttr();
@@ -676,8 +723,27 @@ LogicalResult LoadGatherOp::verify() {
if (!isReadHintOrNone(getL3HintAttr()))
return emitOpError("invalid l3_hint: ") << getL3HintAttr();
- return isValidGatherScatterParams(maskTy, valueTy, tdescTy,
- [&]() { return emitOpError(); });
+ if (tdescTy)
+ return isValidGatherScatterParams(maskTy, valueTy, tdescTy,
+ [&]() { return emitOpError(); });
+ auto srcTy = getSourceType();
+ uint64_t chunkSize = static_cast<int64_t>(getChunkSize().value_or(1));
+ auto memTy = dyn_cast<MemRefType>(srcTy);
+
+ if (memTy && (valueTy.getElementType() != memTy.getElementType()))
+ return emitError() << "Value should have the same element type as MemRef.";
+
+ return isValidGatherScatterBufferParams(maskTy, valueTy, chunkSize,
+ [&]() { return emitOpError(); });
+}
+
+void LoadGatherOp::build(OpBuilder &builder, OperationState &state,
+ Type valueType, Value source, Value mask,
+ xegpu::CachePolicyAttr l1_hint,
+ xegpu::CachePolicyAttr l2_hint,
+ xegpu::CachePolicyAttr l3_hint) {
+ build(builder, state, valueType, source, Value(), mask, IntegerAttr(),
+ l1_hint, l2_hint, l3_hint);
}
//===----------------------------------------------------------------------===//
@@ -688,6 +754,13 @@ LogicalResult StoreScatterOp::verify() {
auto maskTy = getMaskType();
auto valueTy = getValueType();
+ if (tdescTy && !tdescTy.isScattered())
+ return emitOpError("Expects a scattered TensorDesc.\n");
+
+ if (!tdescTy && getRankOf(getDest()) > 1)
+ return emitOpError(
+ "Expecting the dest is a 1D memref or pointer (uint64_t).");
+
if (!isWriteHintOrNone(getL1HintAttr()))
return emitOpError("invalid l1_hint: ") << getL1HintAttr();
@@ -697,8 +770,28 @@ LogicalResult StoreScatterOp::verify() {
if (!isWriteHintOrNone(getL3HintAttr()))
return emitOpError("invalid l3_hint: ") << getL3HintAttr();
- return isValidGatherScatterParams(maskTy, valueTy, tdescTy,
- [&]() { return emitOpError(); });
+ if (tdescTy)
+ return isValidGatherScatterParams(maskTy, valueTy, tdescTy,
+ [&]() { return emitOpError(); });
+
+ auto destTy = getDestType();
+ uint64_t chunkSize = static_cast<int64_t>(getChunkSize().value_or(1));
+ auto memTy = dyn_cast<MemRefType>(destTy);
+
+ if (memTy && (valueTy.getElementType() != memTy.getElementType()))
+ return emitError() << "Value should have the same element type as MemRef.";
+
+ return isValidGatherScatterBufferParams(maskTy, valueTy, chunkSize,
+ [&]() { return emitOpError(); });
+}
+
+void StoreScatterOp::build(OpBuilder &builder, OperationState &state,
+ Value value, Value dest, Value mask,
+ xegpu::CachePolicyAttr l1_hint,
+ xegpu::CachePolicyAttr l2_hint,
+ xegpu::CachePolicyAttr l3_hint) {
+ build(builder, state, value, dest, Value(), mask, IntegerAttr(), l1_hint,
+ l2_hint, l3_hint);
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp
index ec8fad4..c793b71 100644
--- a/mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp
+++ b/mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp
@@ -481,7 +481,8 @@ struct UnrollLoadGatherOp : public UnrollPattern<xegpu::LoadGatherOp> {
VectorType valueTy = llvm::dyn_cast<VectorType>(op.getValue().getType());
xegpu::TensorDescType tdescTy = op.getTensorDescType();
- if (!tdescTy.isScattered())
+ // TODO: handle the unstructure source case (!tdesTy)
+ if (!tdescTy || op.getOffsets())
return failure();
std::optional<SmallVector<int64_t>> targetShape = getTargetShape(op);
@@ -543,7 +544,8 @@ struct UnrollPrefetchOp : public UnrollPattern<xegpu::PrefetchOp> {
Location loc = op.getLoc();
xegpu::TensorDescType tdescTy = op.getTensorDescType();
- if (!tdescTy.isScattered())
+ // TODO: handle the unstructure source case (!tdesTy)
+ if (!tdescTy || op.getOffsets())
return failure();
std::optional<SmallVector<int64_t>> targetShape = getTargetShape(op);
@@ -572,7 +574,8 @@ struct UnrollStoreScatterOp : public UnrollPattern<xegpu::StoreScatterOp> {
VectorType valueTy = llvm::dyn_cast<VectorType>(op.getValue().getType());
xegpu::TensorDescType tdescTy = op.getTensorDescType();
- if (!tdescTy.isScattered())
+ // TODO: handle the unstructure source case (!tdesTy)
+ if (!tdescTy || op.getOffsets())
return failure();
std::optional<SmallVector<int64_t>> targetShape = getTargetShape(op);
diff --git a/mlir/lib/Parser/Parser.cpp b/mlir/lib/Parser/Parser.cpp
index e9b5e92..310680b 100644
--- a/mlir/lib/Parser/Parser.cpp
+++ b/mlir/lib/Parser/Parser.cpp
@@ -17,14 +17,32 @@
using namespace mlir;
+static std::pair<int64_t, int64_t>
+getLineAndColStart(const llvm::SourceMgr &sourceMgr) {
+ unsigned lastFileID = sourceMgr.getNumBuffers();
+ if (lastFileID == 1)
+ return {0, 0};
+
+ auto bufferID = sourceMgr.getMainFileID();
+ const llvm::MemoryBuffer *main = sourceMgr.getMemoryBuffer(bufferID);
+ const llvm::MemoryBuffer *last = sourceMgr.getMemoryBuffer(lastFileID);
+ // Exclude same start.
+ if (main->getBufferStart() < last->getBufferStart() &&
+ main->getBufferEnd() >= last->getBufferEnd()) {
+ return sourceMgr.getLineAndColumn(
+ llvm::SMLoc::getFromPointer(last->getBufferStart()), bufferID);
+ }
+ return {0, 0};
+}
+
LogicalResult mlir::parseSourceFile(const llvm::SourceMgr &sourceMgr,
Block *block, const ParserConfig &config,
LocationAttr *sourceFileLoc) {
const auto *sourceBuf = sourceMgr.getMemoryBuffer(sourceMgr.getMainFileID());
if (sourceFileLoc) {
- *sourceFileLoc = FileLineColLoc::get(config.getContext(),
- sourceBuf->getBufferIdentifier(),
- /*line=*/0, /*column=*/0);
+ auto [line, column] = getLineAndColStart(sourceMgr);
+ *sourceFileLoc = FileLineColLoc::get(
+ config.getContext(), sourceBuf->getBufferIdentifier(), line, column);
}
if (isBytecode(*sourceBuf))
return readBytecodeFile(*sourceBuf, block, config);
@@ -37,9 +55,9 @@ mlir::parseSourceFile(const std::shared_ptr<llvm::SourceMgr> &sourceMgr,
const auto *sourceBuf =
sourceMgr->getMemoryBuffer(sourceMgr->getMainFileID());
if (sourceFileLoc) {
- *sourceFileLoc = FileLineColLoc::get(config.getContext(),
- sourceBuf->getBufferIdentifier(),
- /*line=*/0, /*column=*/0);
+ auto [line, column] = getLineAndColStart(*sourceMgr);
+ *sourceFileLoc = FileLineColLoc::get(
+ config.getContext(), sourceBuf->getBufferIdentifier(), line, column);
}
if (isBytecode(*sourceBuf))
return readBytecodeFile(sourceMgr, block, config);
diff --git a/mlir/lib/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.cpp
index b3577c6..90462d1 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.cpp
@@ -164,6 +164,42 @@ static llvm::Intrinsic::ID getLdMatrixIntrinsicId(NVVM::MMALayout layout,
}
}
+/// Return the intrinsic ID associated with stmatrix for the given paramters.
+static llvm::Intrinsic::ID
+getStMatrixIntrinsicId(NVVM::MMALayout layout, int32_t num,
+ NVVM::LdStMatrixShapeAttr shape,
+ NVVM::LdStMatrixEltType eltType) {
+ if (shape.getM() == 8 && shape.getN() == 8) {
+ switch (num) {
+ case 1:
+ return (layout == NVVM::MMALayout::row)
+ ? llvm::Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_b16
+ : llvm::Intrinsic::
+ nvvm_stmatrix_sync_aligned_m8n8_x1_trans_b16;
+ case 2:
+ return (layout == NVVM::MMALayout::row)
+ ? llvm::Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_b16
+ : llvm::Intrinsic::
+ nvvm_stmatrix_sync_aligned_m8n8_x2_trans_b16;
+ case 4:
+ return (layout == NVVM::MMALayout::row)
+ ? llvm::Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_b16
+ : llvm::Intrinsic::
+ nvvm_stmatrix_sync_aligned_m8n8_x4_trans_b16;
+ }
+ } else if (shape.getM() == 16 && shape.getN() == 8) {
+ switch (num) {
+ case 1:
+ return llvm::Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x1_trans_b8;
+ case 2:
+ return llvm::Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x2_trans_b8;
+ case 4:
+ return llvm::Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x4_trans_b8;
+ }
+ }
+ llvm_unreachable("unknown stmatrix kind");
+}
+
/// Return the intrinsic ID associated with st.bulk for the given address type.
static llvm::Intrinsic::ID
getStBulkIntrinsicId(LLVM::LLVMPointerType addrType) {
diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
index a207cce..6325480 100644
--- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
@@ -30,6 +30,7 @@
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/ScopeExit.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/IR/Comdat.h"
#include "llvm/IR/Constants.h"
@@ -1063,6 +1064,18 @@ void ModuleImport::convertTargetTriple() {
builder.getStringAttr(llvmModule->getTargetTriple().str()));
}
+void ModuleImport::convertModuleLevelAsm() {
+ llvm::StringRef asmStr = llvmModule->getModuleInlineAsm();
+ llvm::SmallVector<mlir::Attribute> asmArrayAttr;
+
+ for (llvm::StringRef line : llvm::split(asmStr, '\n'))
+ if (!line.empty())
+ asmArrayAttr.push_back(builder.getStringAttr(line));
+
+ mlirModule->setAttr(LLVM::LLVMDialect::getModuleLevelAsmAttrName(),
+ builder.getArrayAttr(asmArrayAttr));
+}
+
LogicalResult ModuleImport::convertFunctions() {
for (llvm::Function &func : llvmModule->functions())
if (failed(processFunction(&func)))
@@ -3195,5 +3208,6 @@ OwningOpRef<ModuleOp> mlir::translateLLVMIRToModule(
if (failed(moduleImport.convertIFuncs()))
return {};
moduleImport.convertTargetTriple();
+ moduleImport.convertModuleLevelAsm();
return module;
}
diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index 2685b5c9..b3a06e2 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -2318,6 +2318,25 @@ prepareLLVMModule(Operation *m, llvm::LLVMContext &llvmContext,
llvmModule->setTargetTriple(
llvm::Triple(cast<StringAttr>(targetTripleAttr).getValue()));
+ if (auto asmAttr = m->getDiscardableAttr(
+ LLVM::LLVMDialect::getModuleLevelAsmAttrName())) {
+ auto asmArrayAttr = dyn_cast<ArrayAttr>(asmAttr);
+ if (!asmArrayAttr) {
+ m->emitError("expected an array attribute for a module level asm");
+ return nullptr;
+ }
+
+ for (Attribute elt : asmArrayAttr) {
+ auto asmStrAttr = dyn_cast<StringAttr>(elt);
+ if (!asmStrAttr) {
+ m->emitError(
+ "expected a string attribute for each entry of a module level asm");
+ return nullptr;
+ }
+ llvmModule->appendModuleInlineAsm(asmStrAttr.getValue());
+ }
+ }
+
return llvmModule;
}
diff --git a/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp b/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp
index bdcdaa4..de714d8b 100644
--- a/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp
+++ b/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp
@@ -501,8 +501,7 @@ performActions(raw_ostream &os,
<< "bytecode version while not emitting bytecode";
AsmState asmState(op.get(), OpPrintingFlags(), /*locationMap=*/nullptr,
&fallbackResourceMap);
- op.get()->print(os, asmState);
- os << '\n';
+ os << OpWithState(op.get(), asmState) << '\n';
return success();
}
diff --git a/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv.mlir b/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv.mlir
index 1abe0fd..6e2352e 100644
--- a/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv.mlir
+++ b/mlir/test/Conversion/ArithToSPIRV/arith-to-spirv.mlir
@@ -559,6 +559,23 @@ func.func @constant() {
return
}
+// CHECK-LABEL: @constant_8bit_float
+func.func @constant_8bit_float() {
+ // CHECK: spirv.Constant 56 : i8
+ %cst = arith.constant 1.0 : f8E4M3
+ // CHECK: spirv.Constant 56 : i8
+ %cst_i8 = arith.bitcast %cst : f8E4M3 to i8
+ // CHECK: spirv.Constant dense<56> : vector<4xi8>
+ %cst_vector = arith.constant dense<1.0> : vector<4xf8E4M3>
+ // CHECK: spirv.Constant dense<56> : vector<4xi8>
+ %cst_vector_i8 = arith.bitcast %cst_vector : vector<4xf8E4M3> to vector<4xi8>
+ // CHECK: spirv.Constant dense<60> : tensor<4xi8> : !spirv.array<4 x i8>
+ %cst_tensor = arith.constant dense<1.0> : tensor<4xf8E5M2>
+ // CHECK: spirv.Constant dense<60> : tensor<4xi8> : !spirv.array<4 x i8>
+ %cst_tensor_i8 = arith.bitcast %cst_tensor : tensor<4xf8E5M2> to tensor<4xi8>
+ return
+}
+
// CHECK-LABEL: @constant_16bit
func.func @constant_16bit() {
// CHECK: spirv.Constant 4 : i16
diff --git a/mlir/test/Conversion/FuncToSPIRV/types-to-spirv.mlir b/mlir/test/Conversion/FuncToSPIRV/types-to-spirv.mlir
index 1737f4a..0c77c88 100644
--- a/mlir/test/Conversion/FuncToSPIRV/types-to-spirv.mlir
+++ b/mlir/test/Conversion/FuncToSPIRV/types-to-spirv.mlir
@@ -1,6 +1,8 @@
// RUN: mlir-opt -split-input-file -convert-func-to-spirv %s -o - | FileCheck %s
// RUN: mlir-opt -split-input-file -convert-func-to-spirv="emulate-lt-32-bit-scalar-types=false" %s | \
// RUN: FileCheck %s --check-prefix=NOEMU
+// RUN: mlir-opt -split-input-file -convert-func-to-spirv="emulate-unsupported-float-types=false" %s | \
+// RUN: FileCheck %s --check-prefix=UNSUPPORTED_FLOAT
//===----------------------------------------------------------------------===//
// Integer types
@@ -944,3 +946,55 @@ func.func @unranked_tensor(%arg0: tensor<*xi32>) { return }
func.func @dynamic_dim_tensor(%arg0: tensor<8x?xi32>) { return }
} // end module
+
+
+// -----
+
+// Check that 8-bit float types are emulated as i8.
+module attributes {
+ spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Int8], []>, #spirv.resource_limits<>>
+} {
+
+ // CHECK: spirv.func @float8_to_integer8
+ // CHECK-SAME: (%arg0: i8
+ // CHECK-SAME: %arg1: i8
+ // CHECK-SAME: %arg2: i8
+ // CHECK-SAME: %arg3: i8
+ // CHECK-SAME: %arg4: i8
+ // CHECK-SAME: %arg5: i8
+ // CHECK-SAME: %arg6: i8
+ // CHECK-SAME: %arg7: i8
+ // CHECK-SAME: %arg8: vector<4xi8>
+ // CHECK-SAME: %arg9: !spirv.ptr<!spirv.struct<(!spirv.array<8 x i8, stride=1> [0])>, StorageBuffer>
+ // CHECK-SAME: %arg10: !spirv.array<4 x i8>
+ // UNSUPPORTED_FLOAT-LABEL: func.func @float8_to_integer8
+ // UNSUPPORTED_FLOAT-SAME: (%arg0: f8E5M2
+ // UNSUPPORTED_FLOAT-SAME: %arg1: f8E4M3
+ // UNSUPPORTED_FLOAT-SAME: %arg2: f8E4M3FN
+ // UNSUPPORTED_FLOAT-SAME: %arg3: f8E5M2FNUZ
+ // UNSUPPORTED_FLOAT-SAME: %arg4: f8E4M3FNUZ
+ // UNSUPPORTED_FLOAT-SAME: %arg5: f8E4M3B11FNUZ
+ // UNSUPPORTED_FLOAT-SAME: %arg6: f8E3M4
+ // UNSUPPORTED_FLOAT-SAME: %arg7: f8E8M0FNU
+ // UNSUPPORTED_FLOAT-SAME: %arg8: vector<4xf8E4M3B11FNUZ>
+ // UNSUPPORTED_FLOAT-SAME: %arg9: memref<8xf8E4M3, #spirv.storage_class<StorageBuffer>>
+ // UNSUPPORTED_FLOAT-SAME: %arg10: tensor<4xf8E5M2>
+ // UNSUPPORTED_FLOAT-SAME: ) {
+
+ func.func @float8_to_integer8(
+ %arg0: f8E5M2, // CHECK-NOT: f8E5M2
+ %arg1: f8E4M3, // CHECK-NOT: f8E4M3
+ %arg2: f8E4M3FN, // CHECK-NOT: f8E4M3FN
+ %arg3: f8E5M2FNUZ, // CHECK-NOT: f8E5M2FNUZ
+ %arg4: f8E4M3FNUZ, // CHECK-NOT: f8E4M3FNUZ
+ %arg5: f8E4M3B11FNUZ, // CHECK-NOT: f8E4M3B11FNUZ
+ %arg6: f8E3M4, // CHECK-NOT: f8E3M4
+ %arg7: f8E8M0FNU, // CHECK-NOT: f8E8M0FNU
+ %arg8: vector<4xf8E4M3B11FNUZ>, // CHECK-NOT: vector<4xf8E4M3B11FNUZ>
+ %arg9: memref<8xf8E4M3, #spirv.storage_class<StorageBuffer>>, // CHECK-NOT: memref
+ %arg10: tensor<4xf8E5M2> // CHECK-NOT: tensor
+ ) {
+ // CHECK: spirv.Return
+ return
+ }
+}
diff --git a/mlir/test/Conversion/GPUToSPIRV/lookup-target-env.mlir b/mlir/test/Conversion/GPUToSPIRV/lookup-target-env.mlir
new file mode 100644
index 0000000..983747b
--- /dev/null
+++ b/mlir/test/Conversion/GPUToSPIRV/lookup-target-env.mlir
@@ -0,0 +1,40 @@
+// RUN: mlir-opt --split-input-file --convert-gpu-to-spirv %s | FileCheck %s
+
+module attributes {gpu.container_module} {
+ // CHECK-LABEL: spirv.module @{{.*}} GLSL450
+ gpu.module @kernels [#spirv.target_env<#spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>] {
+ // CHECK: spirv.func @load_kernel
+ // CHECK-SAME: %[[ARG:.*]]: !spirv.ptr<!spirv.struct<(!spirv.array<48 x f32, stride=4> [0])>, StorageBuffer> {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 0)>})
+ gpu.func @load_kernel(%arg0: memref<12x4xf32>) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
+ %c0 = arith.constant 0 : index
+ // CHECK: %[[PTR:.*]] = spirv.AccessChain %[[ARG]]{{\[}}{{%.*}}, {{%.*}}{{\]}}
+ // CHECK-NEXT: {{%.*}} = spirv.Load "StorageBuffer" %[[PTR]] : f32
+ %0 = memref.load %arg0[%c0, %c0] : memref<12x4xf32>
+ // CHECK: spirv.Return
+ gpu.return
+ }
+ }
+}
+
+// -----
+// Checks that the `-convert-gpu-to-spirv` pass selects the first
+// `spirv.target_env` from the `targets` array attribute attached to `gpu.module`.
+module attributes {gpu.container_module} {
+ // CHECK-LABEL: spirv.module @{{.*}} GLSL450
+ // CHECK-SAME: #spirv.target_env<#spirv.vce<v1.4, [Shader], [SPV_KHR_storage_buffer_storage_class]>
+ gpu.module @kernels [
+ #spirv.target_env<#spirv.vce<v1.4, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>,
+ #spirv.target_env<#spirv.vce<v1.0, [Kernel], []>, #spirv.resource_limits<>>,
+ #spirv.target_env<#spirv.vce<v1.0, [Shader], []>, #spirv.resource_limits<>>] {
+ // CHECK: spirv.func @load_kernel
+ // CHECK-SAME: %[[ARG:.*]]: !spirv.ptr<!spirv.struct<(!spirv.array<48 x f32, stride=4> [0])>, StorageBuffer> {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 0)>})
+ gpu.func @load_kernel(%arg0: memref<12x4xf32>) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
+ %c0 = arith.constant 0 : index
+ // CHECK: %[[PTR:.*]] = spirv.AccessChain %[[ARG]]{{\[}}{{%.*}}, {{%.*}}{{\]}}
+ // CHECK-NEXT: {{%.*}} = spirv.Load "StorageBuffer" %[[PTR]] : f32
+ %0 = memref.load %arg0[%c0, %c0] : memref<12x4xf32>
+ // CHECK: spirv.Return
+ gpu.return
+ }
+ }
+}
diff --git a/mlir/test/Conversion/MathToSPIRV/math-to-fpclassify-spirv.mlir b/mlir/test/Conversion/MathToSPIRV/math-to-fpclassify-spirv.mlir
new file mode 100644
index 0000000..3e5f592
--- /dev/null
+++ b/mlir/test/Conversion/MathToSPIRV/math-to-fpclassify-spirv.mlir
@@ -0,0 +1,27 @@
+// RUN: mlir-opt --convert-math-to-spirv %s | FileCheck %s
+
+module attributes {
+ spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Shader], []>, #spirv.resource_limits<>>
+} {
+
+ // CHECK-LABEL: @fpclassify
+ func.func @fpclassify(%x: f32, %v: vector<4xf32>) {
+ // CHECK: spirv.IsFinite %{{.*}} : f32
+ %0 = math.isfinite %x : f32
+ // CHECK: spirv.IsFinite %{{.*}} : vector<4xf32>
+ %1 = math.isfinite %v : vector<4xf32>
+
+ // CHECK: spirv.IsNan %{{.*}} : f32
+ %2 = math.isnan %x : f32
+ // CHECK: spirv.IsNan %{{.*}} : vector<4xf32>
+ %3 = math.isnan %v : vector<4xf32>
+
+ // CHECK: spirv.IsInf %{{.*}} : f32
+ %4 = math.isinf %x : f32
+ // CHECK: spirv.IsInf %{{.*}} : vector<4xf32>
+ %5 = math.isinf %v : vector<4xf32>
+
+ return
+ }
+
+}
diff --git a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
index 8d720ce..580b09d 100644
--- a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
+++ b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
@@ -580,30 +580,6 @@ func.func @elect_one_leader_sync() {
// -----
-// CHECK-LABEL: @stmatrix(
-// CHECK-SAME: %[[arg0:[a-zA-Z0-9_]+]]: !llvm.ptr<3>,
-// CHECK-SAME: %[[arg1:[a-zA-Z0-9_]+]]: i32,
-// CHECK-SAME: %[[arg2:[a-zA-Z0-9_]+]]: i32,
-// CHECK-SAME: %[[arg3:[a-zA-Z0-9_]+]]: i32,
-// CHECK-SAME: %[[arg4:[a-zA-Z0-9_]+]]: i32)
-llvm.func @stmatrix(%arg0 : !llvm.ptr<3>, %m1 : i32, %m2 : i32, %m3 : i32, %m4 : i32) {
-// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "stmatrix.sync.aligned.x1.m8n8.shared.b16 [$0], {$1};", "r,r" %[[arg0]], %[[arg1]] : (!llvm.ptr<3>, i32) -> ()
-// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "stmatrix.sync.aligned.x2.m8n8.shared.b16 [$0], {$1, $2};", "r,r,r" %[[arg0]], %[[arg1]], %[[arg2]] : (!llvm.ptr<3>, i32, i32) -> ()
-// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "stmatrix.sync.aligned.x4.m8n8.shared.b16 [$0], {$1, $2, $3, $4};", "r,r,r,r,r" %[[arg0]], %[[arg1]], %[[arg2]], %[[arg3]], %[[arg4]] : (!llvm.ptr<3>, i32, i32, i32, i32) -> ()
-// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "stmatrix.sync.aligned.x1.trans.m8n8.shared.b16 [$0], {$1};", "r,r" %[[arg0]], %[[arg1]] : (!llvm.ptr<3>, i32) -> ()
-// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "stmatrix.sync.aligned.x2.trans.m8n8.shared.b16 [$0], {$1, $2};", "r,r,r" %[[arg0]], %[[arg1]], %[[arg2]] : (!llvm.ptr<3>, i32, i32) -> ()
-// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "stmatrix.sync.aligned.x4.trans.m8n8.shared.b16 [$0], {$1, $2, $3, $4};", "r,r,r,r,r" %[[arg0]], %[[arg1]], %[[arg2]], %[[arg3]], %[[arg4]] : (!llvm.ptr<3>, i32, i32, i32, i32) -> ()
- nvvm.stmatrix %arg0, %m1 {layout = #nvvm.mma_layout<row>} : !llvm.ptr<3>, i32
- nvvm.stmatrix %arg0, %m1, %m2 {layout = #nvvm.mma_layout<row>} : !llvm.ptr<3>, i32, i32
- nvvm.stmatrix %arg0, %m1, %m2, %m3, %m4 {layout = #nvvm.mma_layout<row>} : !llvm.ptr<3>, i32, i32, i32, i32
- nvvm.stmatrix %arg0, %m1 {layout = #nvvm.mma_layout<col>} : !llvm.ptr<3>, i32
- nvvm.stmatrix %arg0, %m1, %m2 {layout = #nvvm.mma_layout<col>} : !llvm.ptr<3>, i32, i32
- nvvm.stmatrix %arg0, %m1, %m2, %m3, %m4 {layout = #nvvm.mma_layout<col>} : !llvm.ptr<3>, i32, i32, i32, i32
- llvm.return
-}
-
-// -----
-
// CHECK-LABEL: @init_mbarrier_arrive_expect_tx
llvm.func @init_mbarrier_arrive_expect_tx(%desc : !llvm.ptr, %pred : i1) {
//CHECK: llvm.inline_asm has_side_effects asm_dialect = att "prefetch.tensormap [$0];", "l"
diff --git a/mlir/test/Dialect/Async/canonicalize.mlir b/mlir/test/Dialect/Async/canonicalize.mlir
new file mode 100644
index 0000000..1a74eaa
--- /dev/null
+++ b/mlir/test/Dialect/Async/canonicalize.mlir
@@ -0,0 +1,10 @@
+// RUN: mlir-opt %s -split-input-file -canonicalize | FileCheck %s
+
+// CHECK-NOT: async.execute
+
+func.func @empty_execute() {
+ %token = async.execute {
+ async.yield
+ }
+ return
+}
diff --git a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
index a00c798..5f42938 100644
--- a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
+++ b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir
@@ -1076,6 +1076,44 @@ func.func @drop_known_unit_constant_low_high(%arg0: tensor<1x383x128xf32>) -> te
// -----
+func.func @drop_unit_dim_mixed_static_dynamic(%arg0: tensor<1x?xf32>) -> tensor<1x16xf32> {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %cst = arith.constant 0.000000e+00 : f32
+ %padded = tensor.pad %arg0 low[%c0, %c1] high[%c0, %c0] {
+ ^bb0(%arg1: index, %arg2: index):
+ tensor.yield %cst : f32
+ } : tensor<1x?xf32> to tensor<1x16xf32>
+ return %padded : tensor<1x16xf32>
+}
+// CHECK-LABEL: func @drop_unit_dim_mixed_static_dynamic
+// CHECK: %[[CST:.*]] = arith.constant 0.000000e+00 : f32
+// CHECK: %[[COLLAPSE:.+]] = tensor.collapse_shape %[[ARGS:.*]] : tensor<1x?xf32> into tensor<?xf32>
+// CHECK: %[[PADDED:.*]] = tensor.pad %[[COLLAPSE]] low[1] high[0] {
+// CHECK: ^bb0(%[[IDX:.*]]: index):
+// CHECK: tensor.yield %[[CST]] : f32
+// CHECK: } : tensor<?xf32> to tensor<16xf32>
+// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[PADDED]] {{\[\[}}0, 1]] output_shape [1, 16] : tensor<16xf32> into tensor<1x16xf32>
+// CHECK: return %[[EXPANDED]] : tensor<1x16xf32>
+
+// -----
+
+#map = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1 + d4, d2 + d5, d6)>
+#map1 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d4, d5, d6, d3)>
+#map2 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3)>
+module {
+ func.func @drop_unit_dim_corresponding_to_dynamic_dim(%arg0: tensor<1x?x?x1xf32>, %arg1: index) -> tensor<?x1x61x1xf32> {
+ %cst = arith.constant dense<1.000000e+00> : tensor<1x1x1x1xf32>
+ %0 = tensor.empty(%arg1) : tensor<?x1x61x1xf32>
+ %1 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction", "reduction", "reduction"]} ins(%arg0, %cst : tensor<1x?x?x1xf32>, tensor<1x1x1x1xf32>) outs(%0 : tensor<?x1x61x1xf32>) {
+ ^bb0(%in: f32, %in_0: f32, %out: f32):
+ %2 = arith.mulf %in, %in_0 : f32
+ %3 = arith.addf %out, %2 : f32
+ linalg.yield %3 : f32
+ } -> tensor<?x1x61x1xf32>
+ return %1 : tensor<?x1x61x1xf32>
+ }
+}
// CHECK: #[[$MAP1:.+]] = affine_map<(d0) -> (0, d0)>
// CHECK: #[[$MAP2:.+]] = affine_map<(d0) -> ()>
@@ -1097,23 +1135,6 @@ func.func @drop_known_unit_constant_low_high(%arg0: tensor<1x383x128xf32>) -> te
// CHECK: return %[[VAL_14]] : tensor<?x1x61x1xf32>
// CHECK: }
-#map = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1 + d4, d2 + d5, d6)>
-#map1 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d4, d5, d6, d3)>
-#map2 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3)>
-module {
- func.func @drop_unit_dim_corresponding_to_dynamic_dim(%arg0: tensor<1x?x?x1xf32>, %arg1: index) -> tensor<?x1x61x1xf32> {
- %cst = arith.constant dense<1.000000e+00> : tensor<1x1x1x1xf32>
- %0 = tensor.empty(%arg1) : tensor<?x1x61x1xf32>
- %1 = linalg.generic {indexing_maps = [#map, #map1, #map2], iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction", "reduction", "reduction"]} ins(%arg0, %cst : tensor<1x?x?x1xf32>, tensor<1x1x1x1xf32>) outs(%0 : tensor<?x1x61x1xf32>) {
- ^bb0(%in: f32, %in_0: f32, %out: f32):
- %2 = arith.mulf %in, %in_0 : f32
- %3 = arith.addf %out, %2 : f32
- linalg.yield %3 : f32
- } -> tensor<?x1x61x1xf32>
- return %1 : tensor<?x1x61x1xf32>
- }
-}
-
// -----
func.func @no_fold_empty_tensor_dim_out_of_bounds(%arg0: tensor<1x?x10xf32>) -> tensor<1x?xf32> {
diff --git a/mlir/test/Dialect/SCF/canonicalize.mlir b/mlir/test/Dialect/SCF/canonicalize.mlir
index 12d30e17..308cf150 100644
--- a/mlir/test/Dialect/SCF/canonicalize.mlir
+++ b/mlir/test/Dialect/SCF/canonicalize.mlir
@@ -1440,8 +1440,8 @@ func.func @propagate_into_execute_region() {
// -----
-// CHECK-LABEL: func @execute_region_elim
-func.func @execute_region_elim() {
+// CHECK-LABEL: func @execute_region_inline
+func.func @execute_region_inline() {
affine.for %i = 0 to 100 {
"test.foo"() : () -> ()
%v = scf.execute_region -> i64 {
@@ -1461,8 +1461,30 @@ func.func @execute_region_elim() {
// -----
-// CHECK-LABEL: func @func_execute_region_elim
-func.func @func_execute_region_elim() {
+// CHECK-LABEL: func @execute_region_no_inline
+func.func @execute_region_no_inline() {
+ affine.for %i = 0 to 100 {
+ "test.foo"() : () -> ()
+ %v = scf.execute_region -> i64 no_inline {
+ %x = "test.val"() : () -> i64
+ scf.yield %x : i64
+ }
+ "test.bar"(%v) : (i64) -> ()
+ }
+ return
+}
+
+// CHECK-NEXT: affine.for %arg0 = 0 to 100 {
+// CHECK-NEXT: "test.foo"() : () -> ()
+// CHECK-NEXT: scf.execute_region
+// CHECK-NEXT: %[[VAL:.*]] = "test.val"() : () -> i64
+// CHECK-NEXT: scf.yield %[[VAL]] : i64
+// CHECK-NEXT: }
+
+// -----
+
+// CHECK-LABEL: func @func_execute_region_inline
+func.func @func_execute_region_inline() {
"test.foo"() : () -> ()
%v = scf.execute_region -> i64 {
%c = "test.cmp"() : () -> i1
@@ -1496,8 +1518,8 @@ func.func @func_execute_region_elim() {
// -----
-// CHECK-LABEL: func @func_execute_region_elim_multi_yield
-func.func @func_execute_region_elim_multi_yield() {
+// CHECK-LABEL: func @func_execute_region_inline_multi_yield
+func.func @func_execute_region_inline_multi_yield() {
"test.foo"() : () -> ()
%v = scf.execute_region -> i64 {
%c = "test.cmp"() : () -> i1
diff --git a/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir b/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir
index d6c3464..58b8288 100644
--- a/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/logical-ops.mlir
@@ -33,6 +33,24 @@ func.func @inotequal_vector(%arg0: vector<4xi32>, %arg1: vector<4xi32>) -> vecto
// -----
//===----------------------------------------------------------------------===//
+// spirv.IsFinite
+//===----------------------------------------------------------------------===//
+
+func.func @isfinite_scalar(%arg0: f32) -> i1 {
+ // CHECK: spirv.IsFinite {{.*}} : f32
+ %0 = spirv.IsFinite %arg0 : f32
+ return %0 : i1
+}
+
+func.func @isfinite_vector(%arg0: vector<2xf32>) -> vector<2xi1> {
+ // CHECK: spirv.IsFinite {{.*}} : vector<2xf32>
+ %0 = spirv.IsFinite %arg0 : vector<2xf32>
+ return %0 : vector<2xi1>
+}
+
+// -----
+
+//===----------------------------------------------------------------------===//
// spirv.IsInf
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/Dialect/Tosa/canonicalize.mlir b/mlir/test/Dialect/Tosa/canonicalize.mlir
index 6b55442..5150ee3 100644
--- a/mlir/test/Dialect/Tosa/canonicalize.mlir
+++ b/mlir/test/Dialect/Tosa/canonicalize.mlir
@@ -241,6 +241,26 @@ func.func @clamp_f32_is_noop(%arg0: tensor<4xf32>) -> tensor<4xf32> {
// -----
+// CHECK-LABEL: @clamp_boolean_is_noop
+func.func @clamp_boolean_is_noop(%arg0: tensor<4xi1>) -> tensor<4xi1> {
+ // CHECK: return %arg0
+ // CHECK-NOT: tosa.clamp
+ %0 = tosa.clamp %arg0 {min_val = false, max_val = true} : (tensor<4xi1>) -> tensor<4xi1>
+ return %0 : tensor<4xi1>
+}
+
+// -----
+
+// CHECK-LABEL: @clamp_boolean_dynamic_is_noop
+func.func @clamp_boolean_dynamic_is_noop(%arg0: tensor<?xi1>) -> tensor<?xi1> {
+ // CHECK: return %arg0
+ // CHECK-NOT: tosa.clamp
+ %0 = tosa.clamp %arg0 {min_val = false, max_val = true} : (tensor<?xi1>) -> tensor<?xi1>
+ return %0 : tensor<?xi1>
+}
+
+// -----
+
// CHECK-LABEL: @clamp_int8_is_noop
func.func @clamp_int8_is_noop(%arg0: tensor<4xi8>) -> tensor<4xi8> {
// CHECK: return %arg0
diff --git a/mlir/test/Dialect/Vector/canonicalize.mlir b/mlir/test/Dialect/Vector/canonicalize.mlir
index 9cfebd5..56996b5 100644
--- a/mlir/test/Dialect/Vector/canonicalize.mlir
+++ b/mlir/test/Dialect/Vector/canonicalize.mlir
@@ -1330,11 +1330,11 @@ func.func @fold_consecutive_broadcasts(%a : i32) -> vector<4x16xi32> {
// -----
-// CHECK-LABEL: shape_cast_constant
+// CHECK-LABEL: shape_cast_splat_constant
// CHECK-DAG: %[[CST1:.*]] = arith.constant dense<1> : vector<3x4x2xi32>
// CHECK-DAG: %[[CST0:.*]] = arith.constant dense<2.000000e+00> : vector<20x2xf32>
// CHECK: return %[[CST0]], %[[CST1]] : vector<20x2xf32>, vector<3x4x2xi32>
-func.func @shape_cast_constant() -> (vector<20x2xf32>, vector<3x4x2xi32>) {
+func.func @shape_cast_splat_constant() -> (vector<20x2xf32>, vector<3x4x2xi32>) {
%cst = arith.constant dense<2.000000e+00> : vector<5x4x2xf32>
%cst_1 = arith.constant dense<1> : vector<12x2xi32>
%0 = vector.shape_cast %cst : vector<5x4x2xf32> to vector<20x2xf32>
@@ -1344,6 +1344,36 @@ func.func @shape_cast_constant() -> (vector<20x2xf32>, vector<3x4x2xi32>) {
// -----
+// Test of shape_cast's fold method:
+// shape_cast(constant) -> constant.
+//
+// CHECK-LABEL: @shape_cast_dense_int_constant
+// CHECK: %[[CST:.*]] = arith.constant
+// CHECK-SAME{LITERAL}: dense<[[2, 3, 5], [7, 11, 13]]>
+// CHECK: return %[[CST]] : vector<2x3xi8>
+func.func @shape_cast_dense_int_constant() -> vector<2x3xi8> {
+ %cst = arith.constant dense<[2, 3, 5, 7, 11, 13]> : vector<6xi8>
+ %0 = vector.shape_cast %cst : vector<6xi8> to vector<2x3xi8>
+ return %0 : vector<2x3xi8>
+}
+
+// -----
+
+// Test of shape_cast fold's method:
+// (shape_cast(const_x), const_x) -> (const_x_folded, const_x)
+//
+// CHECK-LABEL: @shape_cast_dense_float_constant
+// CHECK-DAG: %[[CST0:.*]] = {{.*}}1.000000e+00, 2.000000e+00{{.*}} vector<1x2xf32>
+// CHECK-DAG: %[[CST1:.*]] = {{.*}}1.000000e+00, 2.000000e+00{{.*}} vector<2xf32>
+// CHECK: return %[[CST1]], %[[CST0]] : vector<2xf32>, vector<1x2xf32>
+func.func @shape_cast_dense_float_constant() -> (vector<2xf32>, vector<1x2xf32>){
+ %cst = arith.constant dense<[[1.0, 2.0]]> : vector<1x2xf32>
+ %0 = vector.shape_cast %cst : vector<1x2xf32> to vector<2xf32>
+ return %0, %cst : vector<2xf32>, vector<1x2xf32>
+}
+
+// -----
+
// CHECK-LABEL: shape_cast_poison
// CHECK-DAG: %[[CST1:.*]] = ub.poison : vector<3x4x2xi32>
// CHECK-DAG: %[[CST0:.*]] = ub.poison : vector<20x2xf32>
diff --git a/mlir/test/Dialect/Vector/int-range-interface.mlir b/mlir/test/Dialect/Vector/int-range-interface.mlir
index 2563b48..b2f16bb 100644
--- a/mlir/test/Dialect/Vector/int-range-interface.mlir
+++ b/mlir/test/Dialect/Vector/int-range-interface.mlir
@@ -51,6 +51,15 @@ func.func @vector_shape_cast() -> vector<4x4xindex> {
func.return %2 : vector<4x4xindex>
}
+// CHECK-LABEL: func @vector_transpose
+// CHECK: test.reflect_bounds {smax = 8 : index, smin = 7 : index, umax = 8 : index, umin = 7 : index}
+func.func @vector_transpose() -> vector<2x4xindex> {
+ %0 = test.with_bounds { smax = 8 : index, smin = 7 : index, umax = 8 : index, umin = 7 : index } : vector<4x2xindex>
+ %1 = vector.transpose %0, [1, 0] : vector<4x2xindex> to vector<2x4xindex>
+ %2 = test.reflect_bounds %1 : vector<2x4xindex>
+ func.return %2 : vector<2x4xindex>
+}
+
// CHECK-LABEL: func @vector_extract
// CHECK: test.reflect_bounds {smax = 6 : index, smin = 5 : index, umax = 6 : index, umin = 5 : index}
func.func @vector_extract() -> index {
@@ -99,3 +108,11 @@ func.func @test_vector_extsi() -> vector<2xi32> {
%2 = test.reflect_bounds %1 : vector<2xi32>
func.return %2 : vector<2xi32>
}
+
+// CHECK-LABEL: func @vector_step
+// CHECK: test.reflect_bounds {smax = 7 : index, smin = 0 : index, umax = 7 : index, umin = 0 : index}
+func.func @vector_step() -> vector<8xindex> {
+ %0 = vector.step : vector<8xindex>
+ %1 = test.reflect_bounds %0 : vector<8xindex>
+ func.return %1 : vector<8xindex>
+}
diff --git a/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir b/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir
index 8e167a5..d5e3443 100644
--- a/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-broadcast-lowering-transforms.mlir
@@ -2,7 +2,7 @@
// CHECK-LABEL: func @broadcast_vec1d_from_scalar
// CHECK-SAME: %[[A:.*0]]: f32
-// CHECK: %[[T0:.*]] = vector.splat %[[A]] : vector<2xf32>
+// CHECK: %[[T0:.*]] = vector.broadcast %[[A]] : f32 to vector<2xf32>
// CHECK: return %[[T0]] : vector<2xf32>
func.func @broadcast_vec1d_from_scalar(%arg0: f32) -> vector<2xf32> {
@@ -12,7 +12,7 @@ func.func @broadcast_vec1d_from_scalar(%arg0: f32) -> vector<2xf32> {
// CHECK-LABEL: func @broadcast_vec2d_from_scalar
// CHECK-SAME: %[[A:.*0]]: f32
-// CHECK: %[[T0:.*]] = vector.splat %[[A]] : vector<2x3xf32>
+// CHECK: %[[T0:.*]] = vector.broadcast %[[A]] : f32 to vector<2x3xf32>
// CHECK: return %[[T0]] : vector<2x3xf32>
func.func @broadcast_vec2d_from_scalar(%arg0: f32) -> vector<2x3xf32> {
@@ -22,7 +22,7 @@ func.func @broadcast_vec2d_from_scalar(%arg0: f32) -> vector<2x3xf32> {
// CHECK-LABEL: func @broadcast_vec3d_from_scalar
// CHECK-SAME: %[[A:.*0]]: f32
-// CHECK: %[[T0:.*]] = vector.splat %[[A]] : vector<2x3x4xf32>
+// CHECK: %[[T0:.*]] = vector.broadcast %[[A]] : f32 to vector<2x3x4xf32>
// CHECK: return %[[T0]] : vector<2x3x4xf32>
func.func @broadcast_vec3d_from_scalar(%arg0: f32) -> vector<2x3x4xf32> {
@@ -87,7 +87,7 @@ func.func @broadcast_vec3d_from_vec2d(%arg0: vector<3x2xf32>) -> vector<4x3x2xf3
// CHECK-LABEL: func @broadcast_stretch
// CHECK-SAME: %[[A:.*0]]: vector<1xf32>
// CHECK: %[[T0:.*]] = vector.extract %[[A]][0] : f32 from vector<1xf32>
-// CHECK: %[[T1:.*]] = vector.splat %[[T0]] : vector<4xf32>
+// CHECK: %[[T1:.*]] = vector.broadcast %[[T0]] : f32 to vector<4xf32>
// CHECK: return %[[T1]] : vector<4xf32>
func.func @broadcast_stretch(%arg0: vector<1xf32>) -> vector<4xf32> {
@@ -113,16 +113,16 @@ func.func @broadcast_stretch_at_start(%arg0: vector<1x4xf32>) -> vector<3x4xf32>
// CHECK-SAME: %[[A:.*0]]: vector<4x1xf32>
// CHECK: %[[U0:.*]] = ub.poison : vector<4x3xf32>
// CHECK: %[[T0:.*]] = vector.extract %[[A]][0, 0] : f32 from vector<4x1xf32>
-// CHECK: %[[T2:.*]] = vector.splat %[[T0]] : vector<3xf32>
+// CHECK: %[[T2:.*]] = vector.broadcast %[[T0]] : f32 to vector<3xf32>
// CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[U0]] [0] : vector<3xf32> into vector<4x3xf32>
// CHECK: %[[T4:.*]] = vector.extract %[[A]][1, 0] : f32 from vector<4x1xf32>
-// CHECK: %[[T6:.*]] = vector.splat %[[T4]] : vector<3xf32>
+// CHECK: %[[T6:.*]] = vector.broadcast %[[T4]] : f32 to vector<3xf32>
// CHECK: %[[T7:.*]] = vector.insert %[[T6]], %[[T3]] [1] : vector<3xf32> into vector<4x3xf32>
// CHECK: %[[T8:.*]] = vector.extract %[[A]][2, 0] : f32 from vector<4x1xf32>
-// CHECK: %[[T10:.*]] = vector.splat %[[T8]] : vector<3xf32>
+// CHECK: %[[T10:.*]] = vector.broadcast %[[T8]] : f32 to vector<3xf32>
// CHECK: %[[T11:.*]] = vector.insert %[[T10]], %[[T7]] [2] : vector<3xf32> into vector<4x3xf32>
// CHECK: %[[T12:.*]] = vector.extract %[[A]][3, 0] : f32 from vector<4x1xf32>
-// CHECK: %[[T14:.*]] = vector.splat %[[T12]] : vector<3xf32>
+// CHECK: %[[T14:.*]] = vector.broadcast %[[T12]] : f32 to vector<3xf32>
// CHECK: %[[T15:.*]] = vector.insert %[[T14]], %[[T11]] [3] : vector<3xf32> into vector<4x3xf32>
// CHECK: return %[[T15]] : vector<4x3xf32>
diff --git a/mlir/test/Dialect/Vector/vector-outerproduct-lowering-transforms.mlir b/mlir/test/Dialect/Vector/vector-outerproduct-lowering-transforms.mlir
index 059d955..5a8125e 100644
--- a/mlir/test/Dialect/Vector/vector-outerproduct-lowering-transforms.mlir
+++ b/mlir/test/Dialect/Vector/vector-outerproduct-lowering-transforms.mlir
@@ -5,11 +5,11 @@
// CHECK-SAME: %[[B:.*1]]: vector<3xf32>
// CHECK: %[[C0:.*]] = arith.constant dense<0.000000e+00> : vector<2x3xf32>
// CHECK: %[[T0:.*]] = vector.extract %[[A]][0] : f32 from vector<2xf32>
-// CHECK: %[[T1:.*]] = vector.splat %[[T0]] : vector<3xf32>
+// CHECK: %[[T1:.*]] = vector.broadcast %[[T0]] : f32 to vector<3xf32>
// CHECK: %[[T2:.*]] = arith.mulf %[[T1]], %[[B]] : vector<3xf32>
// CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[C0]] [0] : vector<3xf32> into vector<2x3xf32>
// CHECK: %[[T4:.*]] = vector.extract %[[A]][1] : f32 from vector<2xf32>
-// CHECK: %[[T5:.*]] = vector.splat %[[T4]] : vector<3xf32>
+// CHECK: %[[T5:.*]] = vector.broadcast %[[T4]] : f32 to vector<3xf32>
// CHECK: %[[T6:.*]] = arith.mulf %[[T5]], %[[B]] : vector<3xf32>
// CHECK: %[[T7:.*]] = vector.insert %[[T6]], %[[T3]] [1] : vector<3xf32> into vector<2x3xf32>
// CHECK: return %[[T7]] : vector<2x3xf32>
@@ -26,12 +26,12 @@ func.func @outerproduct_noacc(%arg0: vector<2xf32>,
// CHECK-SAME: %[[C:.*2]]: vector<2x3xf32>
// CHECK: %[[C0:.*]] = arith.constant dense<0.000000e+00> : vector<2x3xf32>
// CHECK: %[[T0:.*]] = vector.extract %[[A]][0] : f32 from vector<2xf32>
-// CHECK: %[[T1:.*]] = vector.splat %[[T0]] : vector<3xf32>
+// CHECK: %[[T1:.*]] = vector.broadcast %[[T0]] : f32 to vector<3xf32>
// CHECK: %[[T2:.*]] = vector.extract %[[C]][0] : vector<3xf32> from vector<2x3xf32>
// CHECK: %[[T3:.*]] = vector.fma %[[T1]], %[[B]], %[[T2]] : vector<3xf32>
// CHECK: %[[T4:.*]] = vector.insert %[[T3]], %[[C0]] [0] : vector<3xf32> into vector<2x3xf32>
// CHECK: %[[T5:.*]] = vector.extract %[[A]][1] : f32 from vector<2xf32>
-// CHECK: %[[T6:.*]] = vector.splat %[[T5]] : vector<3xf32>
+// CHECK: %[[T6:.*]] = vector.broadcast %[[T5]] : f32 to vector<3xf32>
// CHECK: %[[T7:.*]] = vector.extract %[[C]][1] : vector<3xf32> from vector<2x3xf32>
// CHECK: %[[T8:.*]] = vector.fma %[[T6]], %[[B]], %[[T7]] : vector<3xf32>
// CHECK: %[[T9:.*]] = vector.insert %[[T8]], %[[T4]] [1] : vector<3xf32> into vector<2x3xf32>
@@ -49,11 +49,11 @@ func.func @outerproduct_acc(%arg0: vector<2xf32>,
// CHECK-SAME: %[[B:.*1]]: vector<3xi32>
// CHECK: %[[C0:.*]] = arith.constant dense<0> : vector<2x3xi32>
// CHECK: %[[T0:.*]] = vector.extract %[[A]][0] : i32 from vector<2xi32>
-// CHECK: %[[T1:.*]] = vector.splat %[[T0]] : vector<3xi32>
+// CHECK: %[[T1:.*]] = vector.broadcast %[[T0]] : i32 to vector<3xi32>
// CHECK: %[[T2:.*]] = arith.muli %[[T1]], %[[B]] : vector<3xi32>
// CHECK: %[[T3:.*]] = vector.insert %[[T2]], %[[C0]] [0] : vector<3xi32> into vector<2x3xi32>
// CHECK: %[[T4:.*]] = vector.extract %[[A]][1] : i32 from vector<2xi32>
-// CHECK: %[[T5:.*]] = vector.splat %[[T4]] : vector<3xi32>
+// CHECK: %[[T5:.*]] = vector.broadcast %[[T4]] : i32 to vector<3xi32>
// CHECK: %[[T6:.*]] = arith.muli %[[T5]], %[[B]] : vector<3xi32>
// CHECK: %[[T7:.*]] = vector.insert %[[T6]], %[[T3]] [1] : vector<3xi32> into vector<2x3xi32>
// CHECK: return %[[T7]] : vector<2x3xi32>
@@ -69,13 +69,13 @@ func.func @outerproduct_noacc_int(%arg0: vector<2xi32>,
// CHECK-SAME: %[[C:.*2]]: vector<2x3xi32>
// CHECK: %[[C0:.*]] = arith.constant dense<0> : vector<2x3xi32>
// CHECK: %[[T0:.*]] = vector.extract %[[A]][0] : i32 from vector<2xi32>
-// CHECK: %[[T1:.*]] = vector.splat %[[T0]] : vector<3xi32>
+// CHECK: %[[T1:.*]] = vector.broadcast %[[T0]] : i32 to vector<3xi32>
// CHECK: %[[T2:.*]] = vector.extract %[[C]][0] : vector<3xi32> from vector<2x3xi32>
// CHECK: %[[T3:.*]] = arith.muli %[[T1]], %[[B]] : vector<3xi32>
// CHECK: %[[T4:.*]] = arith.addi %[[T3]], %[[T2]] : vector<3xi32>
// CHECK: %[[T5:.*]] = vector.insert %[[T4]], %[[C0]] [0] : vector<3xi32> into vector<2x3xi32>
// CHECK: %[[T6:.*]] = vector.extract %[[A]][1] : i32 from vector<2xi32>
-// CHECK: %[[T7:.*]] = vector.splat %[[T6]] : vector<3xi32>
+// CHECK: %[[T7:.*]] = vector.broadcast %[[T6]] : i32 to vector<3xi32>
// CHECK: %[[T8:.*]] = vector.extract %[[C]][1] : vector<3xi32> from vector<2x3xi32>
// CHECK: %[[T9:.*]] = arith.muli %[[T7]], %[[B]] : vector<3xi32>
// CHECK: %[[T10:.*]] = arith.addi %[[T9]], %[[T8]] : vector<3xi32>
@@ -91,7 +91,7 @@ func.func @outerproduct_acc_int(%arg0: vector<2xi32>,
// CHECK-LABEL: func @axpy_fp(
// CHECK-SAME: %[[A:.*0]]: vector<16xf32>,
// CHECK-SAME: %[[B:.*1]]: f32)
-// CHECK: %[[T0:.*]] = vector.splat %[[B]] : vector<16xf32>
+// CHECK: %[[T0:.*]] = vector.broadcast %[[B]] : f32 to vector<16xf32>
// CHECK: %[[T1:.*]] = arith.mulf %[[A]], %[[T0]] : vector<16xf32>
// CHECK: return %[[T1]] : vector<16xf32>
func.func @axpy_fp(%arg0: vector<16xf32>, %arg1: f32) -> vector<16xf32> {
@@ -103,7 +103,7 @@ func.func @axpy_fp(%arg0: vector<16xf32>, %arg1: f32) -> vector<16xf32> {
// CHECK-SAME: %[[A:.*0]]: vector<16xf32>,
// CHECK-SAME: %[[B:.*1]]: f32,
// CHECK-SAME: %[[C:.*2]]: vector<16xf32>)
-// CHECK: %[[T0:.*]] = vector.splat %[[B]] : vector<16xf32>
+// CHECK: %[[T0:.*]] = vector.broadcast %[[B]] : f32 to vector<16xf32>
// CHECK: %[[T1:.*]] = vector.fma %[[A]], %[[T0]], %[[C]] : vector<16xf32>
// CHECK: return %[[T1]] : vector<16xf32>
func.func @axpy_fp_add(%arg0: vector<16xf32>, %arg1: f32, %arg2 : vector<16xf32>) -> vector<16xf32> {
@@ -114,7 +114,7 @@ func.func @axpy_fp_add(%arg0: vector<16xf32>, %arg1: f32, %arg2 : vector<16xf32>
// CHECK-LABEL: func @axpy_int(
// CHECK-SAME: %[[A:.*0]]: vector<16xi32>,
// CHECK-SAME: %[[B:.*1]]: i32)
-// CHECK: %[[T0:.*]] = vector.splat %[[B]] : vector<16xi32>
+// CHECK: %[[T0:.*]] = vector.broadcast %[[B]] : i32 to vector<16xi32>
// CHECK: %[[T1:.*]] = arith.muli %[[A]], %[[T0]] : vector<16xi32>
// CHECK: return %[[T1]] : vector<16xi32>
func.func @axpy_int(%arg0: vector<16xi32>, %arg1: i32) -> vector<16xi32> {
@@ -126,7 +126,7 @@ func.func @axpy_int(%arg0: vector<16xi32>, %arg1: i32) -> vector<16xi32> {
// CHECK-SAME: %[[A:.*0]]: vector<16xi32>,
// CHECK-SAME: %[[B:.*1]]: i32,
// CHECK-SAME: %[[C:.*2]]: vector<16xi32>)
-// CHECK: %[[T0:.*]] = vector.splat %[[B]] : vector<16xi32>
+// CHECK: %[[T0:.*]] = vector.broadcast %[[B]] : i32 to vector<16xi32>
// CHECK: %[[T1:.*]] = arith.muli %[[A]], %[[T0]] : vector<16xi32>
// CHECK: %[[T2:.*]] = arith.addi %[[T1]], %[[C]] : vector<16xi32>
// CHECK: return %[[T2]] : vector<16xi32>
diff --git a/mlir/test/Dialect/XeGPU/invalid.mlir b/mlir/test/Dialect/XeGPU/invalid.mlir
index 0160bfe..dff3ffa 100644
--- a/mlir/test/Dialect/XeGPU/invalid.mlir
+++ b/mlir/test/Dialect/XeGPU/invalid.mlir
@@ -385,6 +385,74 @@ func.func @load_gather_vc_3(%src: ui64) {
}
// -----
+func.func @prefetch_offset_wi_1(%src: memref<4x4xf32>) {
+ %offsets = arith.constant dense<[0]> : vector<1xindex>
+ // expected-error@+1 {{Expecting the source is a 1D memref or pointer}}
+ xegpu.prefetch %src[%offsets]: memref<4x4xf32>, vector<1xindex>
+ return
+}
+
+// -----
+func.func @load_gather_offset_sg(%src: memref<?xf16>) {
+ %offsets = arith.constant dense<[0, 8, 16, 24]> : vector<4xindex>
+ %mask = arith.constant dense<1>: vector<8xi1>
+ // expected-error@+1 {{Mask should match value except the chunk size dim}}
+ %2 = xegpu.load %src[%offsets], %mask
+ : memref<?xf16>, vector<4xindex>, vector<8xi1>
+ -> vector<4x2xf16>
+ return
+}
+
+// -----
+func.func @load_gather_offset_wi(%src: ui64) {
+ %mask = arith.constant dense<1>: vector<1xi1>
+ %offsets = arith.constant dense<[0]> : vector<1xindex>
+ // expected-error@+1 {{value elements must match chunk size}}
+ %2 = xegpu.load %src[%offsets], %mask <{chunk_size = 2}> : ui64, vector<1xindex>, vector<1xi1> -> vector<4xf32>
+ return
+}
+
+// -----
+func.func @store_scatter_offset_wi_1(%src: memref<?xf16>) {
+ %val = arith.constant dense<2.9>: vector<4xf16>
+ %offsets = arith.constant dense<[0]> : vector<1xindex>
+ %mask = arith.constant dense<1>: vector<1xi1>
+ // expected-error@+1 {{value elements must match chunk size}}
+ xegpu.store %val, %src[%offsets], %mask
+ : vector<4xf16>, memref<?xf16>, vector<1xindex>, vector<1xi1>
+ return
+}
+
+// -----
+func.func @store_scatter_offset_wi_2(%src: memref<4x4xf16>) {
+ %val = arith.constant dense<2.9>: vector<4xf16>
+ %offsets = arith.constant dense<[0]> : vector<1xindex>
+ %mask = arith.constant dense<1>: vector<1xi1>
+ // expected-error@+1 {{Expecting the dest is a 1D memref or pointer}}
+ xegpu.store %val, %src[%offsets], %mask
+ : vector<4xf16>, memref<4x4xf16>, vector<1xindex>, vector<1xi1>
+ return
+}
+
+// -----
+func.func @load_gather_offset_wi_2(%src: ui64) {
+ %mask = arith.constant dense<1>: vector<1xi1>
+ %offsets = arith.constant dense<[0]> : vector<1xindex>
+ // expected-error@+1 {{value elements must match chunk size}}
+ %2 = xegpu.load %src[%offsets], %mask <{chunk_size = 2}> : ui64, vector<1xindex>, vector<1xi1> -> vector<4xf16>
+ return
+}
+
+// -----
+func.func @load_gather_offset_wi_1(%src: memref<4x4xf32>) {
+ %mask = arith.constant dense<1>: vector<1xi1>
+ %offsets = arith.constant dense<[0]> : vector<1xindex>
+ // expected-error@+1 {{Expecting the source is a 1D memref or pointer}}
+ %2 = xegpu.load %src[%offsets], %mask <{chunk_size = 2}> : memref<4x4xf32>, vector<1xindex>, vector<1xi1> -> vector<2xf32>
+ return
+}
+
+// -----
func.func @store_scatter_vc_1(%src: memref<24x32xf32>) {
%0 = arith.constant dense<1>: vector<4xi1>
%1 = arith.constant dense<2.9>: vector<4x2xf32>
diff --git a/mlir/test/Dialect/XeGPU/ops.mlir b/mlir/test/Dialect/XeGPU/ops.mlir
index 3ebb1b969a..6be2371 100644
--- a/mlir/test/Dialect/XeGPU/ops.mlir
+++ b/mlir/test/Dialect/XeGPU/ops.mlir
@@ -521,6 +521,16 @@ gpu.func @subgroup_load_4(%src: ui64) {
gpu.return
}
+// CHECK: gpu.func @subgroup_load_offset_1(%arg0: memref<?xf16>) {
+gpu.func @subgroup_load_offset_1(%src: memref<?xf16>) {
+ %offset = arith.constant dense<[0, 8, 16, 24]> : vector<4xindex>
+ %mask = arith.constant dense<1>: vector<4xi1>
+ //CHECK: %[[R1:.*]] = xegpu.load %arg0[%cst], %cst_0 <{chunk_size = 2 : i64, l1_hint = #xegpu.cache_hint<cached>}> : memref<?xf16>, vector<4xindex>, vector<4xi1> -> vector<4x2xf16>
+ %val = xegpu.load %src[%offset], %mask <{chunk_size=2, l1_hint = #xegpu.cache_hint<cached>}>
+ : memref<?xf16>, vector<4xindex>, vector<4xi1> -> vector<4x2xf16>
+ gpu.return
+}
+
// CHECK: gpu.func @subgroup_store(%[[arg0:.*]]: ui64) {
gpu.func @subgroup_store(%src: ui64) {
//CHECK: %[[cst:.*]] = arith.constant dense<[0, 8, 16, 24]> : vector<4xindex>
@@ -626,6 +636,17 @@ gpu.func @subgroup_store_4(%src: ui64) {
gpu.return
}
+// CHECK: gpu.func @subgroup_store_offset_1(%arg0: memref<?xf16>) {
+gpu.func @subgroup_store_offset_1(%dest: memref<?xf16>) {
+ %val = arith.constant dense<2.9>: vector<4x2xf16>
+ %offset = arith.constant dense<[0, 8, 16, 24]> : vector<4xindex>
+ %mask = arith.constant dense<1>: vector<4xi1>
+ //CHECK: xegpu.store %[[R0:.*]], %arg0[%cst_0], %cst_1 <{chunk_size = 2 : i64, l1_hint = #xegpu.cache_hint<cached>}> : vector<4x2xf16>, memref<?xf16>, vector<4xindex>, vector<4xi1>
+ xegpu.store %val, %dest[%offset], %mask <{chunk_size=2, l1_hint = #xegpu.cache_hint<cached>}>
+ : vector<4x2xf16>, memref<?xf16>, vector<4xindex>, vector<4xi1>
+ gpu.return
+}
+
// CHECK: gpu.func @prefetch(%[[arg0:.*]]: ui64) {
gpu.func @prefetch(%src: ui64) {
//CHECK: %[[cst:.*]] = arith.constant dense<[0, 8, 16, 24]> : vector<4xindex>
@@ -637,6 +658,14 @@ gpu.func @prefetch(%src: ui64) {
gpu.return
}
+// CHECK: gpu.func @prefetch_offset(%[[arg0:.*]]: ui64) {
+gpu.func @prefetch_offset(%src: ui64) {
+ //CHECK: %[[cst:.*]] = arith.constant dense<[0, 8, 16, 24]> : vector<4xindex>
+ %0 = arith.constant dense<[0, 8, 16, 24]> : vector<4xindex>
+ // CHECK: xegpu.prefetch %[[arg0]][%cst] <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>}> : ui64, vector<4xindex>
+ xegpu.prefetch %src[%0] <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>}>: ui64, vector<4xindex>
+ gpu.return
+}
// CHECK: gpu.func @create_update_tdesc(%[[arg0:.*]]: ui64) {
gpu.func @create_update_tdesc(%src: ui64) {
diff --git a/mlir/test/IR/top-level.mlir b/mlir/test/IR/top-level.mlir
index e0adb4d82..5389691 100644
--- a/mlir/test/IR/top-level.mlir
+++ b/mlir/test/IR/top-level.mlir
@@ -6,10 +6,10 @@ func.func private @foo()
// -----
-// expected-error@-9 {{source must contain a single top-level operation, found: 2}}
+// expected-error@-2 {{source must contain a single top-level operation, found: 2}}
func.func private @bar()
func.func private @baz()
// -----
-// expected-error@-15 {{source must contain a single top-level operation, found: 0}}
+// expected-error@-2 {{source must contain a single top-level operation, found: 0}}
diff --git a/mlir/test/Target/LLVMIR/Import/module-asm.ll b/mlir/test/Target/LLVMIR/Import/module-asm.ll
new file mode 100644
index 0000000..38f6ea4
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/Import/module-asm.ll
@@ -0,0 +1,5 @@
+; RUN: mlir-translate -import-llvm %s | FileCheck %s
+; CHECK: llvm.module_asm = ["foo", "bar"]
+
+module asm "foo"
+module asm "bar"
diff --git a/mlir/test/Target/LLVMIR/invalid-module.mlir b/mlir/test/Target/LLVMIR/invalid-module.mlir
index 7fd5f26..5ed6244 100644
--- a/mlir/test/Target/LLVMIR/invalid-module.mlir
+++ b/mlir/test/Target/LLVMIR/invalid-module.mlir
@@ -1,6 +1,16 @@
-// RUN: mlir-translate -verify-diagnostics -mlir-to-llvmir --no-implicit-module %s
+// RUN: mlir-translate -verify-diagnostics -mlir-to-llvmir --no-implicit-module -split-input-file %s
// expected-error@below {{'llvm.func' op can not be translated to an LLVMIR module}}
llvm.func @foo() {
llvm.return
}
+
+// -----
+
+// expected-error@below {{expected an array attribute for a module level asm}}
+module attributes {llvm.module_asm = "foo"} {}
+
+// -----
+
+// expected-error@below {{expected a string attribute for each entry of a module level asm}}
+module attributes {llvm.module_asm = [42]} {}
diff --git a/mlir/test/Target/LLVMIR/module-asm.mlir b/mlir/test/Target/LLVMIR/module-asm.mlir
new file mode 100644
index 0000000..2afb37c
--- /dev/null
+++ b/mlir/test/Target/LLVMIR/module-asm.mlir
@@ -0,0 +1,6 @@
+// RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
+
+module attributes {llvm.module_asm = ["foo", "bar"]} {}
+
+// CHECK: module asm "foo"
+// CHECK: module asm "bar"
diff --git a/mlir/test/Target/LLVMIR/nvvmir-invalid.mlir b/mlir/test/Target/LLVMIR/nvvmir-invalid.mlir
index 8c4f0aa..85478cc 100644
--- a/mlir/test/Target/LLVMIR/nvvmir-invalid.mlir
+++ b/mlir/test/Target/LLVMIR/nvvmir-invalid.mlir
@@ -312,3 +312,42 @@ llvm.func @nvvm_prefetch_uniform_with_invalid_addr_space(%global_ptr: !llvm.ptr<
nvvm.prefetch level = L1 uniform, %global_ptr : !llvm.ptr<1>
llvm.return
}
+
+// -----
+
+llvm.func @st_matrix(%arg0: !llvm.ptr<3>, %r1: i32, %r2: i32, %r3: i32, %r4: i32) {
+ // expected-error@+1 {{'nvvm.stmatrix' op expected num attribute to be 1, 2 or 4}}
+ nvvm.stmatrix %arg0, %r1, %r2, %r3 {layout = #nvvm.mma_layout<row>, shape = #nvvm.ld_st_matrix_shape<m = 8, n = 8>, eltType = #nvvm.ld_st_matrix_elt_type<b16>} : !llvm.ptr<3>, i32, i32, i32
+ llvm.return
+}
+
+// -----
+
+llvm.func @st_matrix(%arg0: !llvm.ptr<3>, %r1: i32, %r2: i32, %r3: i32, %r4: i32) {
+ // expected-error@+1 {{'nvvm.stmatrix' op expected shape to be 8x8 or 16x8}}
+ nvvm.stmatrix %arg0, %r1 {layout = #nvvm.mma_layout<row>, shape = #nvvm.ld_st_matrix_shape<m = 16, n = 16>, eltType = #nvvm.ld_st_matrix_elt_type<b16>} : !llvm.ptr<3>, i32
+ llvm.return
+}
+
+// -----
+
+llvm.func @st_matrix(%arg0: !llvm.ptr<3>, %r1: i32, %r2: i32, %r3: i32, %r4: i32) {
+ // expected-error@+1 {{'nvvm.stmatrix' op expected element type to be B16 for 8x8 matrix}}
+ nvvm.stmatrix %arg0, %r1 {layout = #nvvm.mma_layout<row>, shape = #nvvm.ld_st_matrix_shape<m = 8, n = 8>, eltType = #nvvm.ld_st_matrix_elt_type<b8>} : !llvm.ptr<3>, i32
+ llvm.return
+}
+// -----
+
+llvm.func @st_matrix(%arg0: !llvm.ptr<3>, %r1: i32, %r2: i32, %r3: i32, %r4: i32) {
+ // expected-error@+1 {{'nvvm.stmatrix' op expected element type to be B8 for 16x8 matrix}}
+ nvvm.stmatrix %arg0, %r1 {layout = #nvvm.mma_layout<col>, shape = #nvvm.ld_st_matrix_shape<m = 16, n = 8>, eltType = #nvvm.ld_st_matrix_elt_type<b16>} : !llvm.ptr<3>, i32
+ llvm.return
+}
+
+// -----
+
+llvm.func @st_matrix(%arg0: !llvm.ptr<3>, %r1: i32, %r2: i32, %r3: i32, %r4: i32) {
+ // expected-error@+1 {{'nvvm.stmatrix' op expected layout to be col for 16x8 matrix}}
+ nvvm.stmatrix %arg0, %r1 {layout = #nvvm.mma_layout<row>, shape = #nvvm.ld_st_matrix_shape<m = 16, n = 8>, eltType = #nvvm.ld_st_matrix_elt_type<b8>} : !llvm.ptr<3>, i32
+ llvm.return
+}
diff --git a/mlir/test/Target/LLVMIR/nvvmir.mlir b/mlir/test/Target/LLVMIR/nvvmir.mlir
index f86a041..5c2cfa4 100644
--- a/mlir/test/Target/LLVMIR/nvvmir.mlir
+++ b/mlir/test/Target/LLVMIR/nvvmir.mlir
@@ -573,6 +573,29 @@ llvm.func @ld_matrix(%arg0: !llvm.ptr<3>) {
llvm.return
}
+// CHECK-LABEL: @st_matrix
+llvm.func @st_matrix(%arg0: !llvm.ptr<3>, %r1: i32, %r2: i32, %r3: i32, %r4: i32) {
+ // CHECK: call void @llvm.nvvm.stmatrix.sync.aligned.m8n8.x1.b16.p3(ptr addrspace(3) %{{.*}}, i32 %{{.*}})
+ nvvm.stmatrix %arg0, %r1 {layout = #nvvm.mma_layout<row>, shape = #nvvm.ld_st_matrix_shape<m = 8, n = 8>, eltType = #nvvm.ld_st_matrix_elt_type<b16>} : !llvm.ptr<3>, i32
+ // CHECK: call void @llvm.nvvm.stmatrix.sync.aligned.m8n8.x1.trans.b16.p3(ptr addrspace(3) %{{.*}}, i32 %{{.*}})
+ nvvm.stmatrix %arg0, %r1 {layout = #nvvm.mma_layout<col>, shape = #nvvm.ld_st_matrix_shape<m = 8, n = 8>, eltType = #nvvm.ld_st_matrix_elt_type<b16>} : !llvm.ptr<3>, i32
+ // CHECK: call void @llvm.nvvm.stmatrix.sync.aligned.m16n8.x1.trans.b8.p3(ptr addrspace(3) %{{.*}}, i32 %{{.*}})
+ nvvm.stmatrix %arg0, %r1 {layout = #nvvm.mma_layout<col>, shape = #nvvm.ld_st_matrix_shape<m = 16, n = 8>, eltType = #nvvm.ld_st_matrix_elt_type<b8>} : !llvm.ptr<3>, i32
+ // CHECK: call void @llvm.nvvm.stmatrix.sync.aligned.m8n8.x2.b16.p3(ptr addrspace(3) %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+ nvvm.stmatrix %arg0, %r1, %r2 {layout = #nvvm.mma_layout<row>, shape = #nvvm.ld_st_matrix_shape<m = 8, n = 8>, eltType = #nvvm.ld_st_matrix_elt_type<b16>} : !llvm.ptr<3>, i32, i32
+ // CHECK: call void @llvm.nvvm.stmatrix.sync.aligned.m8n8.x2.trans.b16.p3(ptr addrspace(3) %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+ nvvm.stmatrix %arg0, %r1, %r2 {layout = #nvvm.mma_layout<col>, shape = #nvvm.ld_st_matrix_shape<m = 8, n = 8>, eltType = #nvvm.ld_st_matrix_elt_type<b16>} : !llvm.ptr<3>, i32, i32
+ // CHECK: call void @llvm.nvvm.stmatrix.sync.aligned.m16n8.x2.trans.b8.p3(ptr addrspace(3) %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+ nvvm.stmatrix %arg0, %r1, %r2 {layout = #nvvm.mma_layout<col>, shape = #nvvm.ld_st_matrix_shape<m = 16, n = 8>, eltType = #nvvm.ld_st_matrix_elt_type<b8>} : !llvm.ptr<3>, i32, i32
+ // CHECK: call void @llvm.nvvm.stmatrix.sync.aligned.m8n8.x4.b16.p3(ptr addrspace(3) %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+ nvvm.stmatrix %arg0, %r1, %r2, %r3, %r4 {layout = #nvvm.mma_layout<row>, shape = #nvvm.ld_st_matrix_shape<m = 8, n = 8>, eltType = #nvvm.ld_st_matrix_elt_type<b16>} : !llvm.ptr<3>, i32, i32, i32, i32
+ // CHECK: call void @llvm.nvvm.stmatrix.sync.aligned.m8n8.x4.trans.b16.p3(ptr addrspace(3) %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+ nvvm.stmatrix %arg0, %r1, %r2, %r3, %r4 {layout = #nvvm.mma_layout<col>, shape = #nvvm.ld_st_matrix_shape<m = 8, n = 8>, eltType = #nvvm.ld_st_matrix_elt_type<b16>} : !llvm.ptr<3>, i32, i32, i32, i32
+ // CHECK: call void @llvm.nvvm.stmatrix.sync.aligned.m16n8.x4.trans.b8.p3(ptr addrspace(3) %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
+ nvvm.stmatrix %arg0, %r1, %r2, %r3, %r4 {layout = #nvvm.mma_layout<col>, shape = #nvvm.ld_st_matrix_shape<m = 16, n = 8>, eltType = #nvvm.ld_st_matrix_elt_type<b8>} : !llvm.ptr<3>, i32, i32, i32, i32
+ llvm.return
+}
+
// This function has the "kernel" attribute attached and should appear in the
// NVVM annotations after conversion.
llvm.func @kernel_func() attributes {nvvm.kernel} {
diff --git a/mlir/test/Target/SPIRV/logical-ops.mlir b/mlir/test/Target/SPIRV/logical-ops.mlir
index b200871..05cbddc 100644
--- a/mlir/test/Target/SPIRV/logical-ops.mlir
+++ b/mlir/test/Target/SPIRV/logical-ops.mlir
@@ -84,6 +84,8 @@ spirv.module Logical GLSL450 requires #spirv.vce<v1.0, [Shader], []> {
%15 = spirv.IsNan %arg0 : f32
// CHECK: spirv.IsInf
%16 = spirv.IsInf %arg1 : f32
+ // CHECK: spirv.IsFinite
+ %17 = spirv.IsFinite %arg0 : f32
spirv.Return
}
}
diff --git a/mlir/test/mlir-tblgen/op-properties-predicates.td b/mlir/test/mlir-tblgen/op-properties-predicates.td
index 7cd24aa..af09ee7 100644
--- a/mlir/test/mlir-tblgen/op-properties-predicates.td
+++ b/mlir/test/mlir-tblgen/op-properties-predicates.td
@@ -70,6 +70,12 @@ def OpWithPredicates : NS_Op<"op_with_predicates"> {
// CHECK-NEXT: if (!(((!prop.has_value())) || ((::llvm::all_of((*(prop)), [](const int64_t& baseStore) -> bool { return [](int64_t baseIface) -> bool { return ((baseIface >= 0)); }(baseStore); })) && (!(((*(prop)).empty()))))))
// CHECK: failed to satisfy constraint: optional non-empty array of non-negative int64_
+// CHECK-LABEL: ::llvm::LogicalResult OpWithPredicatesAdaptor::verify
+// Note: comprehensive emission of verifiers is tested in verifyINvariantsImpl() below
+// CHECK: int64_t tblgen_scalar = this->getScalar();
+// CHECK: if (!((tblgen_scalar >= 0)))
+// CHECK: return emitError(loc, "'test.op_with_predicates' op ""property 'scalar' failed to satisfy constraint: non-negative int64_t");
+
// CHECK-LABEL: OpWithPredicates::verifyInvariantsImpl()
// Note: for test readability, we capture [[maybe_unused]] into the variable maybe_unused
// CHECK: [[maybe_unused:\[\[maybe_unused\]\]]] int64_t tblgen_scalar = this->getScalar();
diff --git a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
index f35cfa6..8ea4eb7 100644
--- a/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
+++ b/mlir/tools/mlir-tblgen/OpDefinitionsGen.cpp
@@ -1127,7 +1127,7 @@ static void genPropertyVerifier(
body << formatv(fetchProperty, varName, getterName,
prop.prop.getInterfaceType());
auto uniquedFn = staticVerifierEmitter.getPropConstraintFn(prop.prop);
- if (uniquedFn.has_value())
+ if (uniquedFn.has_value() && emitHelper.isEmittingForOp())
body << formatv(verifyPropertyUniqued, *uniquedFn, varName, prop.name);
else
body << formatv(
@@ -4764,6 +4764,7 @@ void OpOperandAdaptorEmitter::addVerification() {
FmtContext verifyCtx;
populateSubstitutions(emitHelper, verifyCtx);
+ genPropertyVerifier(emitHelper, verifyCtx, body, staticVerifierEmitter);
genAttributeVerifier(emitHelper, verifyCtx, body, staticVerifierEmitter,
useProperties);
diff --git a/runtimes/CMakeLists.txt b/runtimes/CMakeLists.txt
index d0d2a53..bfb4341 100644
--- a/runtimes/CMakeLists.txt
+++ b/runtimes/CMakeLists.txt
@@ -36,7 +36,7 @@ list(INSERT CMAKE_MODULE_PATH 0
# We order libraries to mirror roughly how they are layered, except that compiler-rt can depend
# on libc++, so we put it after.
set(LLVM_DEFAULT_RUNTIMES "libc;libunwind;libcxxabi;pstl;libcxx;compiler-rt;libclc;openmp;offload")
-set(LLVM_SUPPORTED_RUNTIMES "${LLVM_DEFAULT_RUNTIMES};llvm-libgcc;flang-rt")
+set(LLVM_SUPPORTED_RUNTIMES "${LLVM_DEFAULT_RUNTIMES};llvm-libgcc;flang-rt;libsycl")
set(LLVM_ENABLE_RUNTIMES "" CACHE STRING
"Semicolon-separated list of runtimes to build, or \"all\" (${LLVM_DEFAULT_RUNTIMES}). Supported runtimes are ${LLVM_SUPPORTED_RUNTIMES}.")
if(LLVM_ENABLE_RUNTIMES STREQUAL "all" )
diff --git a/third-party/benchmark/src/cycleclock.h b/third-party/benchmark/src/cycleclock.h
index d4f1330..c0dffcf 100644
--- a/third-party/benchmark/src/cycleclock.h
+++ b/third-party/benchmark/src/cycleclock.h
@@ -79,7 +79,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
int64_t ret;
__asm__ volatile("rdtsc" : "=A"(ret));
return ret;
-#elif defined(__x86_64__) || defined(__amd64__)
+#elif (defined(__x86_64__) || defined(__amd64__)) && !defined(__arm64ec__)
uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
return (high << 32) | low;
@@ -139,7 +139,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
struct timespec ts = {0, 0};
clock_gettime(CLOCK_MONOTONIC, &ts);
return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
-#elif defined(__aarch64__)
+#elif defined(__aarch64__) || defined(__arm64ec__)
// System timer of ARMv8 runs at a different frequency than the CPU's.
// The frequency is fixed, typically in the range 1-50MHz. It can be
// read at CNTFRQ special register. We assume the OS has set up
diff --git a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
index bd9641d..a1d5afd 100644
--- a/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/libc/BUILD.bazel
@@ -2275,6 +2275,16 @@ libc_support_library(
)
libc_support_library(
+ name = "__support_math_atan2",
+ hdrs = ["src/__support/math/atan2.h"],
+ deps = [
+ ":__support_fputil_double_double",
+ ":__support_fputil_nearest_integer",
+ ":__support_math_atan_utils",
+ ],
+)
+
+libc_support_library(
name = "__support_math_atanf",
hdrs = ["src/__support/math/atanf.h"],
deps = [
@@ -2289,6 +2299,21 @@ libc_support_library(
)
libc_support_library(
+ name = "__support_math_atanf16",
+ hdrs = ["src/__support/math/atanf16.h"],
+ deps = [
+ ":__support_fputil_cast",
+ ":__support_fputil_except_value_utils",
+ ":__support_fputil_fenv_impl",
+ ":__support_fputil_fp_bits",
+ ":__support_fputil_polyeval",
+ ":__support_fputil_multiply_add",
+ ":__support_fputil_sqrt",
+ ":__support_macros_optimization",
+ ],
+)
+
+libc_support_library(
name = "__support_math_asinf",
hdrs = ["src/__support/math/asinf.h"],
deps = [
@@ -2905,6 +2930,13 @@ libc_math_function(
)
libc_math_function(
+ name = "atanf16",
+ additional_deps = [
+ ":__support_math_atanf16"
+ ],
+)
+
+libc_math_function(
name = "atan",
additional_deps = [
":__support_math_atan"
@@ -2923,9 +2955,7 @@ libc_math_function(
libc_math_function(
name = "atan2",
additional_deps = [
- ":__support_fputil_double_double",
- ":__support_fputil_nearest_integer",
- ":__support_math_atan_utils",
+ ":__support_math_atan2",
],
)
diff --git a/utils/bazel/llvm-project-overlay/libc/libc_configure_options.bzl b/utils/bazel/llvm-project-overlay/libc/libc_configure_options.bzl
index 96d7fa8..b49e7c3 100644
--- a/utils/bazel/llvm-project-overlay/libc/libc_configure_options.bzl
+++ b/utils/bazel/llvm-project-overlay/libc/libc_configure_options.bzl
@@ -46,4 +46,7 @@ LIBC_CONFIGURE_OPTIONS = [
# Documentation in libc/src/__support/libc_assert.h
# "LIBC_COPT_USE_C_ASSERT",
+
+ # Documentation in libc/docs/configure.rst
+ "LIBC_THREAD_MODE=LIBC_THREAD_MODE_PLATFORM",
]
diff --git a/utils/bazel/llvm-project-overlay/lldb/source/Plugins/BUILD.bazel b/utils/bazel/llvm-project-overlay/lldb/source/Plugins/BUILD.bazel
index 1db8244..e96fc03 100644
--- a/utils/bazel/llvm-project-overlay/lldb/source/Plugins/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/lldb/source/Plugins/BUILD.bazel
@@ -152,6 +152,7 @@ cc_library(
"//clang:lex",
"//clang:sema",
"//lldb:CoreHeaders",
+ "//lldb:ExpressionHeaders",
"//lldb:Host",
"//lldb:SymbolHeaders",
"//lldb:TargetHeaders",
@@ -335,6 +336,7 @@ cc_library(
"//lldb:SymbolHeaders",
"//lldb:TargetHeaders",
"//lldb:Utility",
+ "//lldb:UtilityPrivateHeaders",
"//llvm:BinaryFormat",
"//llvm:DebugInfoDWARF",
"//llvm:Demangle",
diff --git a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
index 118bf64..5930b85 100644
--- a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
@@ -1281,6 +1281,7 @@ cc_library(
":AnalysisFpExc",
":BinaryFormat",
":Core",
+ ":FrontendHLSL",
":Object",
":ProfileData",
":Support",
diff --git a/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel
index 813fd0c..c5fe783 100644
--- a/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/llvm/unittests/BUILD.bazel
@@ -133,6 +133,7 @@ cc_test(
"//llvm:AsmParser",
"//llvm:CodeGen",
"//llvm:Core",
+ "//llvm:FrontendHLSL",
"//llvm:Instrumentation",
"//llvm:Passes",
"//llvm:Support",